Merge pull request #1545 from SteadBytes/contrib-python3
Move Contrib Scripts to Python3
This commit is contained in:
@@ -1,7 +1,6 @@
|
|||||||
language: python
|
language: python
|
||||||
dist: xenial
|
dist: xenial
|
||||||
python:
|
python:
|
||||||
- "3.4"
|
|
||||||
- "3.5"
|
- "3.5"
|
||||||
- "3.6"
|
- "3.6"
|
||||||
- "3.7"
|
- "3.7"
|
||||||
@@ -14,6 +13,7 @@ before_install:
|
|||||||
install:
|
install:
|
||||||
- pip install -q pillow
|
- pip install -q pillow
|
||||||
- pip install -q numpy
|
- pip install -q numpy
|
||||||
|
- pip install -q networkx
|
||||||
- python3 setup.py build
|
- python3 setup.py build
|
||||||
before_script:
|
before_script:
|
||||||
- git clone git://github.com/overviewer/Minecraft-Overviewer-Addons.git ~/mcoa/
|
- git clone git://github.com/overviewer/Minecraft-Overviewer-Addons.git ~/mcoa/
|
||||||
|
|||||||
185
contrib/contributors.py
Executable file → Normal file
185
contrib/contributors.py
Executable file → Normal file
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python2
|
#!/usr/bin/env python3
|
||||||
"""Update the contributor list
|
"""Update the contributor list
|
||||||
|
|
||||||
Alias handling is done by git with .mailmap
|
Alias handling is done by git with .mailmap
|
||||||
@@ -6,110 +6,133 @@ New contributors are merged in the short-term list.
|
|||||||
Moving them to a "higher" list should be a manual process.
|
Moving them to a "higher" list should be a manual process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import fileinput
|
import re
|
||||||
from subprocess import Popen, PIPE
|
from pathlib import Path
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
CONTRIB_FILE_CONTRIBUTOR_RE = re.compile(r'\* (.+) (<.+>)')
|
||||||
|
|
||||||
|
|
||||||
def format_contributor(contributor):
|
def format_contributor(contributor):
|
||||||
return " * {0} {1}".format(
|
return " * {0} {1}".format(contributor["name"], contributor["email"])
|
||||||
" ".join(contributor["name"]),
|
|
||||||
contributor["email"])
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def get_contributors():
|
||||||
# generate list of contributors
|
""" Parse all contributors from output of git shortlog -se
|
||||||
|
"""
|
||||||
contributors = []
|
contributors = []
|
||||||
p_git = Popen(["git", "shortlog", "-se"], stdout=PIPE)
|
p_git = subprocess.run(["git", "shortlog", "-se"], stdout=subprocess.PIPE)
|
||||||
for line in p_git.stdout:
|
for line in p_git.stdout.decode('utf-8').split('\n'):
|
||||||
|
m = re.search(r"(\d+)\t(.+) (<.+>)", line)
|
||||||
|
if m:
|
||||||
contributors.append({
|
contributors.append({
|
||||||
'count': int(line.split("\t")[0].strip()),
|
"count": int(m.group(1)),
|
||||||
'name': line.split("\t")[1].split()[0:-1],
|
"name": m.group(2),
|
||||||
'email': line.split("\t")[1].split()[-1]
|
"email": m.group(3)
|
||||||
})
|
})
|
||||||
|
return contributors
|
||||||
|
|
||||||
# cache listed contributors
|
|
||||||
|
def get_old_contributors(contrib_file_lines):
|
||||||
|
""" Parse existing contributors from CONTRIBUTORS.rst
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(list) Contributors as {"name", "email"} dicts
|
||||||
|
"""
|
||||||
old_contributors = []
|
old_contributors = []
|
||||||
with open("CONTRIBUTORS.rst", "r") as contrib_file:
|
for line in contrib_file_lines:
|
||||||
for line in contrib_file:
|
m = CONTRIB_FILE_CONTRIBUTOR_RE.search(line)
|
||||||
if "@" in line:
|
if m:
|
||||||
old_contributors.append({
|
old_contributors.append({"name": m.group(1), "email": m.group(2)})
|
||||||
'name': line.split()[1:-1],
|
return old_contributors
|
||||||
'email': line.split()[-1]
|
|
||||||
})
|
|
||||||
|
|
||||||
old = map(lambda x: (x['name'], x['email']), old_contributors)
|
|
||||||
old_emails = map(lambda x: x['email'], old_contributors)
|
|
||||||
old_names = map(lambda x: x['name'], old_contributors)
|
|
||||||
|
|
||||||
# check which contributors are new
|
def get_new_contributors(contributors, old_contributors):
|
||||||
|
""" Find new contributors and any possible alias or email changes
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(tuple) list of new contributors,
|
||||||
|
list of new aliases as (contributor, existing_name),
|
||||||
|
list of new emails as (contributor, existing_email)
|
||||||
|
"""
|
||||||
|
old_email_names = {c['email']: c['name'] for c in old_contributors}
|
||||||
|
old_name_emails = {c['name']: c['email'] for c in old_contributors}
|
||||||
new_contributors = []
|
new_contributors = []
|
||||||
update_mailmap = False
|
new_alias = []
|
||||||
|
new_email = []
|
||||||
for contributor in contributors:
|
for contributor in contributors:
|
||||||
if (contributor['name'], contributor['email']) in old:
|
name, email = contributor['name'], contributor['email']
|
||||||
# this exact combination already in the list
|
existing_name, existing_email = old_email_names.get(email), old_name_emails.get(name)
|
||||||
|
|
||||||
|
if existing_name == name and existing_email == email:
|
||||||
|
# exact combination already in list
|
||||||
pass
|
pass
|
||||||
elif (contributor['email'] not in old_emails
|
elif existing_name is None and existing_email is None:
|
||||||
and contributor['name'] not in old_names):
|
|
||||||
# name AND email are not in the list
|
|
||||||
new_contributors.append(contributor)
|
new_contributors.append(contributor)
|
||||||
elif contributor['email'] in old_emails:
|
elif existing_name is not None:
|
||||||
# email is listed, but with another name
|
new_alias.append((contributor, existing_name))
|
||||||
old_name = filter(lambda x: x['email'] == contributor['email'],
|
elif existing_email is not None:
|
||||||
old_contributors)[0]['name']
|
new_email.append((contributor, existing_email))
|
||||||
print "new alias %s for %s %s ?" % (
|
return (
|
||||||
" ".join(contributor['name']),
|
sorted(new_contributors, key=lambda x: x['name'].split()[-1].lower()),
|
||||||
" ".join(old_name),
|
new_alias,
|
||||||
contributor['email'])
|
new_email
|
||||||
update_mailmap = True
|
)
|
||||||
elif contributor['name'] in old_names:
|
|
||||||
# probably a new email for a previous contributor
|
|
||||||
other_mail = filter(lambda x: x['name'] == contributor['name'],
|
|
||||||
old_contributors)[0]['email']
|
|
||||||
print "new email %s for %s %s ?" % (
|
|
||||||
contributor['email'],
|
|
||||||
" ".join(contributor['name']),
|
|
||||||
other_mail)
|
|
||||||
update_mailmap = True
|
|
||||||
if update_mailmap:
|
|
||||||
print "Please update .mailmap"
|
|
||||||
|
|
||||||
# sort on the last word of the name
|
|
||||||
new_contributors = sorted(new_contributors,
|
|
||||||
key=lambda x: x['name'][-1].lower())
|
|
||||||
|
|
||||||
# show new contributors to be merged to the list
|
def merge_short_term_contributors(contrib_file_lines, new_contributors):
|
||||||
if new_contributors:
|
""" Merge new contributors into Short-term Contributions section in
|
||||||
print "inserting:"
|
alphabetical order.
|
||||||
for contributor in new_contributors:
|
|
||||||
print format_contributor(contributor)
|
|
||||||
|
|
||||||
# merge with alphabetical (by last part of name) contributor list
|
Returns:
|
||||||
i = 0
|
(list) Lines including new contributors for writing to CONTRIBUTORS.rst
|
||||||
|
"""
|
||||||
short_term_found = False
|
short_term_found = False
|
||||||
for line in fileinput.input("CONTRIBUTORS.rst", inplace=1):
|
for (i, line) in enumerate(contrib_file_lines):
|
||||||
if not short_term_found:
|
if not short_term_found:
|
||||||
print line,
|
|
||||||
if "Short-term" in line:
|
if "Short-term" in line:
|
||||||
short_term_found = True
|
short_term_found = True
|
||||||
else:
|
else:
|
||||||
if i >= len(new_contributors) or "@" not in line:
|
if CONTRIB_FILE_CONTRIBUTOR_RE.search(line):
|
||||||
print line,
|
|
||||||
else:
|
|
||||||
listed_name = line.split()[-2].lower()
|
|
||||||
contributor = new_contributors[i]
|
|
||||||
# insert all new contributors that fit here
|
|
||||||
while listed_name > contributor["name"][-1].lower():
|
|
||||||
print format_contributor(contributor)
|
|
||||||
i += 1
|
|
||||||
if i < len(new_contributors):
|
|
||||||
contributor = new_contributors[i]
|
|
||||||
else:
|
|
||||||
break
|
break
|
||||||
print line,
|
|
||||||
# append remaining contributors
|
short_term_contributor_lines = [l for l in contrib_file_lines[i:] if l] + \
|
||||||
with open("CONTRIBUTORS.rst", "a") as contrib_file:
|
[format_contributor(c) + "\n" for c in new_contributors]
|
||||||
while i < len(new_contributors):
|
|
||||||
contrib_file.write(format_contributor(new_contributors[i]) + "\n")
|
def last_name_sort(contrib_line):
|
||||||
i += 1
|
m = CONTRIB_FILE_CONTRIBUTOR_RE.search(contrib_line)
|
||||||
|
return m.group(1).split()[-1].lower()
|
||||||
|
|
||||||
|
return contrib_file_lines[:i] + sorted(short_term_contributor_lines, key=last_name_sort)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
contrib_file = Path("CONTRIBUTORS.rst")
|
||||||
|
with contrib_file.open() as f:
|
||||||
|
contrib_file_lines = f.readlines()
|
||||||
|
|
||||||
|
old_contributors = get_old_contributors(contrib_file_lines)
|
||||||
|
|
||||||
|
contributors = get_contributors()
|
||||||
|
new_contributors, new_alias, new_email = get_new_contributors(contributors, old_contributors)
|
||||||
|
|
||||||
|
for contributor, old_name in new_alias:
|
||||||
|
print("new alias {0} for {1} {2} ?".format(
|
||||||
|
contributor['name'], old_name, contributor['email']))
|
||||||
|
|
||||||
|
for contributor, old_email in new_email:
|
||||||
|
print("new email {0} for {1} {2} ?".format(
|
||||||
|
contributor['email'], contributor['name'], old_email))
|
||||||
|
|
||||||
|
if new_alias or new_email:
|
||||||
|
print("Please update .mailmap")
|
||||||
|
|
||||||
|
if new_contributors:
|
||||||
|
print("inserting:")
|
||||||
|
print("\n".join([format_contributor(c) for c in new_contributors]))
|
||||||
|
|
||||||
|
with contrib_file.open("w") as f:
|
||||||
|
f.writelines(merge_short_term_contributors(contrib_file_lines, new_contributors))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -1,92 +1,93 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
"""Convert gibberish back into Cyrillic"""
|
"""Convert gibberish back into Cyrillic"""
|
||||||
|
|
||||||
import fileinput
|
import fileinput
|
||||||
import os
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
usage = """
|
|
||||||
If you have signs that should be Cyrillic, but are instead gibberish,
|
|
||||||
this script will convert it back to proper Cyrillic.
|
|
||||||
|
|
||||||
usage: python %(script)s <markers.js>
|
|
||||||
ex. python %(script)s C:\\Inetpub\\www\\map\\markers.js
|
|
||||||
or %(script)s /srv/http/map/markers.js
|
|
||||||
""" % {'script': os.path.basename(sys.argv[0])}
|
|
||||||
|
|
||||||
if len(sys.argv) < 2:
|
|
||||||
sys.exit(usage)
|
|
||||||
|
|
||||||
gibberish_to_cyrillic = {
|
gibberish_to_cyrillic = {
|
||||||
r"\u00c0": r"\u0410",
|
'À': 'А',
|
||||||
r"\u00c1": r"\u0411",
|
'Á': 'Б',
|
||||||
r"\u00c2": r"\u0412",
|
'Â': 'В',
|
||||||
r"\u00c3": r"\u0413",
|
'Ã': 'Г',
|
||||||
r"\u00c4": r"\u0414",
|
'Ä': 'Д',
|
||||||
r"\u00c5": r"\u0415",
|
'Å': 'Е',
|
||||||
r"\u00c6": r"\u0416",
|
'Æ': 'Ж',
|
||||||
r"\u00c7": r"\u0417",
|
'Ç': 'З',
|
||||||
r"\u00c8": r"\u0418",
|
'È': 'И',
|
||||||
r"\u00c9": r"\u0419",
|
'É': 'Й',
|
||||||
r"\u00ca": r"\u041a",
|
'Ê': 'К',
|
||||||
r"\u00cb": r"\u041b",
|
'Ë': 'Л',
|
||||||
r"\u00cc": r"\u041c",
|
'Ì': 'М',
|
||||||
r"\u00cd": r"\u041d",
|
'Í': 'Н',
|
||||||
r"\u00ce": r"\u041e",
|
'Î': 'О',
|
||||||
r"\u00cf": r"\u041f",
|
'Ï': 'П',
|
||||||
r"\u00d0": r"\u0420",
|
'Ð': 'Р',
|
||||||
r"\u00d1": r"\u0421",
|
'Ñ': 'С',
|
||||||
r"\u00d2": r"\u0422",
|
'Ò': 'Т',
|
||||||
r"\u00d3": r"\u0423",
|
'Ó': 'У',
|
||||||
r"\u00d4": r"\u0424",
|
'Ô': 'Ф',
|
||||||
r"\u00d5": r"\u0425",
|
'Õ': 'Х',
|
||||||
r"\u00d6": r"\u0426",
|
'Ö': 'Ц',
|
||||||
r"\u00d7": r"\u0427",
|
'×': 'Ч',
|
||||||
r"\u00d8": r"\u0428",
|
'Ø': 'Ш',
|
||||||
r"\u00d9": r"\u0429",
|
'Ù': 'Щ',
|
||||||
r"\u00da": r"\u042a",
|
'Ú': 'Ъ',
|
||||||
r"\u00db": r"\u042b",
|
'Û': 'Ы',
|
||||||
r"\u00dc": r"\u042c",
|
'Ü': 'Ь',
|
||||||
r"\u00dd": r"\u042d",
|
'Ý': 'Э',
|
||||||
r"\u00de": r"\u042e",
|
'Þ': 'Ю',
|
||||||
r"\u00df": r"\u042f",
|
'ß': 'Я',
|
||||||
r"\u00e0": r"\u0430",
|
'à': 'а',
|
||||||
r"\u00e1": r"\u0431",
|
'á': 'б',
|
||||||
r"\u00e2": r"\u0432",
|
'â': 'в',
|
||||||
r"\u00e3": r"\u0433",
|
'ã': 'г',
|
||||||
r"\u00e4": r"\u0434",
|
'ä': 'д',
|
||||||
r"\u00e5": r"\u0435",
|
'å': 'е',
|
||||||
r"\u00e6": r"\u0436",
|
'æ': 'ж',
|
||||||
r"\u00e7": r"\u0437",
|
'ç': 'з',
|
||||||
r"\u00e8": r"\u0438",
|
'è': 'и',
|
||||||
r"\u00e9": r"\u0439",
|
'é': 'й',
|
||||||
r"\u00ea": r"\u043a",
|
'ê': 'к',
|
||||||
r"\u00eb": r"\u043b",
|
'ë': 'л',
|
||||||
r"\u00ec": r"\u043c",
|
'ì': 'м',
|
||||||
r"\u00ed": r"\u043d",
|
'í': 'н',
|
||||||
r"\u00ee": r"\u043e",
|
'î': 'о',
|
||||||
r"\u00ef": r"\u043f",
|
'ï': 'п',
|
||||||
r"\u00f0": r"\u0440",
|
'ð': 'р',
|
||||||
r"\u00f1": r"\u0441",
|
'ñ': 'с',
|
||||||
r"\u00f2": r"\u0442",
|
'ò': 'т',
|
||||||
r"\u00f3": r"\u0443",
|
'ó': 'у',
|
||||||
r"\u00f4": r"\u0444",
|
'ô': 'ф',
|
||||||
r"\u00f5": r"\u0445",
|
'õ': 'х',
|
||||||
r"\u00f6": r"\u0446",
|
'ö': 'ц',
|
||||||
r"\u00f7": r"\u0447",
|
'÷': 'ч',
|
||||||
r"\u00f8": r"\u0448",
|
'ø': 'ш',
|
||||||
r"\u00f9": r"\u0449",
|
'ù': 'щ',
|
||||||
r"\u00fa": r"\u044a",
|
'ú': 'ъ',
|
||||||
r"\u00fb": r"\u044b",
|
'û': 'ы',
|
||||||
r"\u00fc": r"\u044c",
|
'ü': 'ь',
|
||||||
r"\u00fd": r"\u044d",
|
'ý': 'э',
|
||||||
r"\u00fe": r"\u044e",
|
'þ': 'ю',
|
||||||
r"\u00ff": r"\u044f"
|
'ÿ': 'я'
|
||||||
}
|
}
|
||||||
|
trans_table = {ord(k): v for k, v in gibberish_to_cyrillic.items()}
|
||||||
|
|
||||||
for line in fileinput.FileInput(inplace=1):
|
|
||||||
for i, j in gibberish_to_cyrillic.iteritems():
|
|
||||||
line = line.replace(i, j)
|
|
||||||
sys.stdout.write(line)
|
|
||||||
|
|
||||||
|
def convert(s):
|
||||||
|
return s.translate(trans_table)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
description = """
|
||||||
|
If you have signs that should be Cyrillic, but are instead gibberish,
|
||||||
|
this script will convert it back to proper Cyrillic.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(description=description)
|
||||||
|
parser.add_argument('file', metavar='markers.js')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
convert(args.markers_file)
|
||||||
|
for line in fileinput.input(files=markers_file, inplace=1):
|
||||||
|
print(convert(s), end='')
|
||||||
|
|||||||
@@ -1,40 +1,49 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
"""
|
"""
|
||||||
Outputs a huge image with all currently-supported block textures.
|
Outputs a huge image with all currently-supported block textures.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from overviewer_core import textures
|
import argparse
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
import sys
|
import sys
|
||||||
import Image
|
import os
|
||||||
|
|
||||||
if len(sys.argv) != 2:
|
# incantation to be able to import overviewer_core
|
||||||
print "usage: %s [output.png]" % (sys.argv[0],)
|
if not hasattr(sys, "frozen"):
|
||||||
sys.exit(1)
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
|
||||||
|
|
||||||
t = textures.Textures()
|
|
||||||
t.generate()
|
|
||||||
|
|
||||||
blocks = {}
|
def main(outfile):
|
||||||
|
from overviewer_core import textures
|
||||||
|
t = textures.Textures()
|
||||||
|
t.generate()
|
||||||
|
|
||||||
for blockid in xrange(textures.max_blockid):
|
blocks = {}
|
||||||
for data in xrange(textures.max_data):
|
|
||||||
|
for blockid in range(textures.max_blockid):
|
||||||
|
for data in range(textures.max_data):
|
||||||
tex = t.blockmap[blockid * textures.max_data + data]
|
tex = t.blockmap[blockid * textures.max_data + data]
|
||||||
if tex:
|
if tex:
|
||||||
if not blockid in blocks:
|
if blockid not in blocks:
|
||||||
blocks[blockid] = {}
|
blocks[blockid] = {}
|
||||||
blocks[blockid][data] = tex
|
blocks[blockid][data] = tex
|
||||||
|
|
||||||
columns = max(map(len, blocks.values()))
|
columns = max(len(v) for v in blocks.values())
|
||||||
rows = len(blocks)
|
rows = len(blocks)
|
||||||
texsize = t.texture_size
|
texsize = t.texture_size
|
||||||
|
|
||||||
gallery = Image.new("RGBA", (columns * texsize, rows * texsize), t.bgcolor)
|
gallery = Image.new("RGBA", (columns * texsize, rows * texsize), t.bgcolor)
|
||||||
|
|
||||||
row = 0
|
for row, (blockid, textures) in enumerate(blocks.items()):
|
||||||
for blockid, textures in blocks.iteritems():
|
for column, (data, tex) in enumerate(textures.items()):
|
||||||
column = 0
|
|
||||||
for data, tex in textures.iteritems():
|
|
||||||
gallery.paste(tex[0], (column * texsize, row * texsize))
|
gallery.paste(tex[0], (column * texsize, row * texsize))
|
||||||
column += 1
|
|
||||||
row += 1
|
|
||||||
|
|
||||||
gallery.save(sys.argv[1])
|
gallery.save(outfile)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
|
parser.add_argument('file', metavar='output.png')
|
||||||
|
args = parser.parse_args()
|
||||||
|
main(args.file)
|
||||||
|
|||||||
@@ -1,20 +1,21 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
"""
|
"""
|
||||||
Very basic player.dat inspection script
|
Very basic player.dat inspection script
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
# incantation to be able to import overviewer_core
|
# incantation to be able to import overviewer_core
|
||||||
if not hasattr(sys, "frozen"):
|
if not hasattr(sys, "frozen"):
|
||||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
|
||||||
|
|
||||||
|
|
||||||
from overviewer_core.nbt import load
|
from overviewer_core.nbt import load
|
||||||
from overviewer_core import items
|
from overviewer_core import items
|
||||||
|
|
||||||
|
|
||||||
def print_player(data, sub_entry=False):
|
def print_player(data, sub_entry=False):
|
||||||
indent = ""
|
indent = ""
|
||||||
if sub_entry:
|
if sub_entry:
|
||||||
@@ -36,26 +37,58 @@ def print_player(data, sub_entry=False):
|
|||||||
print(" %-3d %s" % (item['Count'], items.id2item(item['id'])))
|
print(" %-3d %s" % (item['Count'], items.id2item(item['id'])))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
def find_all_player_files(dir_path):
|
||||||
if len(sys.argv) < 2 or len(sys.argv) > 3:
|
for player_file in dir_path.iterdir():
|
||||||
print("Usage: {} <Player .dat or directory> [selected player]"
|
player = player_file.stem
|
||||||
.format(sys.argv[0]), file=sys.stderr)
|
yield player_file, player
|
||||||
sys.exit(1)
|
|
||||||
print("Inspecting %s" % sys.argv[1])
|
|
||||||
|
|
||||||
if os.path.isdir(sys.argv[1]):
|
|
||||||
directory = sys.argv[1]
|
def find_player_file(dir_path, selected_player):
|
||||||
if len(sys.argv) > 2:
|
for player_file, player in find_all_player_files(dir_path):
|
||||||
selected_player = sys.argv[2]
|
if selected_player == player:
|
||||||
else:
|
return player_file, player
|
||||||
selected_player = None
|
raise FileNotFoundError()
|
||||||
for player_file in os.listdir(directory):
|
|
||||||
player = player_file.split(".")[0]
|
|
||||||
if selected_player in [None, player]:
|
def load_and_output_player(player_file_path, player, sub_entry=False):
|
||||||
|
with player_file_path.open('rb') as f:
|
||||||
|
player_data = load(f)[1]
|
||||||
print("")
|
print("")
|
||||||
print(player)
|
print(player)
|
||||||
data = load(os.path.join(directory, player_file))[1]
|
print_player(player_data, sub_entry=sub_entry)
|
||||||
print_player(data, sub_entry=(selected_player is None))
|
|
||||||
else:
|
|
||||||
data = load(sys.argv[1])[1]
|
def dir_or_file(path):
|
||||||
print_player(data)
|
p = Path(path)
|
||||||
|
if not p.is_file() and not p.is_dir():
|
||||||
|
raise argparse.ArgumentTypeError("Not a valid file or directory path")
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
def main(path, selected_player=None):
|
||||||
|
print("Inspecting %s" % args.path)
|
||||||
|
|
||||||
|
if not path.is_dir():
|
||||||
|
load_and_output_player(args.path)
|
||||||
|
return
|
||||||
|
|
||||||
|
if selected_player is None:
|
||||||
|
for player_file, player in find_all_player_files(args.path):
|
||||||
|
load_and_output_player(player_file, player)
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
player_file, player = find_player_file(args.path, args.selected_player)
|
||||||
|
load_and_output_player(player_file, player, sub_entry=True)
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("No %s.dat in %s" % (args.selected_player, args.path))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
|
parser.add_argument('path', metavar='<Player.dat or directory>', type=dir_or_file)
|
||||||
|
parser.add_argument('selected_player', nargs='?', default=None)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
main(args.path, selected_player=args.selected_player)
|
||||||
|
|||||||
@@ -1,40 +1,51 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
"""Deletes outlying and unconnected regions"""
|
"""Deletes outlying and unconnected regions"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
from pathlib import Path
|
||||||
import sys
|
|
||||||
import glob
|
|
||||||
|
|
||||||
import networkx
|
import networkx
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
logger.setLevel(logging.DEBUG)
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
def get_region_file_from_node(regionset_path, node):
|
def get_region_file_from_node(regionset_path, node):
|
||||||
return os.path.join(regionset_path, 'r.%d.%d.mca' % node)
|
return regionset_path / ('r.%d.%d.mca' % node)
|
||||||
|
|
||||||
|
|
||||||
def get_nodes(regionset_path):
|
def get_nodes(regionset_path):
|
||||||
return [tuple(map(int, r.split('.')[1:3])) \
|
return [
|
||||||
for r in glob.glob(os.path.join(regionset_path, 'r.*.*.mca'))]
|
tuple(int(x) for x in r.stem.split('.')[1:3])
|
||||||
|
for r in regionset_path.glob('r.*.*.mca')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def generate_edges(graph):
|
def generate_edges(graph):
|
||||||
offsets = (-1, 1)
|
offsets = (-1, 1)
|
||||||
nodes = graph.nodes()
|
nodes = graph.nodes()
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
for offset in offsets:
|
for offset in offsets:
|
||||||
graph.add_edges_from((node, offset_node) for offset_node in \
|
graph.add_edges_from(
|
||||||
[(node[0] + offset, node[1]), (node[0], node[1] + offset), \
|
(node, offset_node)
|
||||||
(node[0] + offset, node[1] + offset)] \
|
for offset_node in [
|
||||||
if offset_node in nodes)
|
(node[0] + offset, node[1]),
|
||||||
|
(node[0], node[1] + offset),
|
||||||
|
(node[0] + offset, node[1] + offset),
|
||||||
|
]
|
||||||
|
if offset_node in nodes
|
||||||
|
)
|
||||||
return graph
|
return graph
|
||||||
|
|
||||||
|
|
||||||
def generate_subgraphs(nodes):
|
def generate_subgraphs(nodes):
|
||||||
graph = networkx.Graph()
|
graph = networkx.Graph()
|
||||||
graph.add_nodes_from(nodes)
|
graph.add_nodes_from(nodes)
|
||||||
generate_edges(graph)
|
generate_edges(graph)
|
||||||
return graph, networkx.connected_component_subgraphs(graph)
|
return graph, [graph.subgraph(c) for c in networkx.connected_components(graph)]
|
||||||
|
|
||||||
|
|
||||||
def get_graph_bounds(graph):
|
def get_graph_bounds(graph):
|
||||||
nodes = graph.nodes()
|
nodes = graph.nodes()
|
||||||
@@ -45,114 +56,130 @@ def get_graph_bounds(graph):
|
|||||||
min(n[1] for n in nodes),
|
min(n[1] for n in nodes),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_graph_center_by_bounds(bounds):
|
def get_graph_center_by_bounds(bounds):
|
||||||
dx = bounds[0] - bounds[1]
|
dx = bounds[0] - bounds[1]
|
||||||
dy = bounds[2] - bounds[3]
|
dy = bounds[2] - bounds[3]
|
||||||
return (dx / 2 + bounds[1], dy / 2 + bounds[3])
|
return (dx // 2 + bounds[1], dy // 2 + bounds[3])
|
||||||
|
|
||||||
def main(*args, **options):
|
|
||||||
if len(args) < 1:
|
def trim_regions(graph, regions_path, dry_run=True, filter_func=lambda n: True):
|
||||||
logger.error('Missing region directory argument')
|
regions = [
|
||||||
return
|
(n, get_region_file_from_node(regions_path, n))
|
||||||
for path in args:
|
for n in graph.nodes()
|
||||||
logger.info('Using regionset path: %s', path)
|
if filter_func(n)
|
||||||
|
]
|
||||||
|
logger.info("Trimming regions: %s", ", ".join(x[1] for x in regions))
|
||||||
|
for n, region_file in regions:
|
||||||
|
graph.remove_node(n)
|
||||||
|
if dry_run is False:
|
||||||
|
unlink_file(region_file)
|
||||||
|
|
||||||
|
|
||||||
|
def is_outside_main(center, main_section_bounds):
|
||||||
|
return center[0] <= main_section_bounds[0] and center[0] >= main_section_bounds[1] and \
|
||||||
|
center[1] <= main_section_bounds[2] and center[1] >= main_section_bounds[3]
|
||||||
|
|
||||||
|
|
||||||
|
def is_outside_bounds(node, trim_center, trim_bounds):
|
||||||
|
return node[0] >= trim_center[0] + trim_bounds[0] or \
|
||||||
|
node[0] <= trim_center[0] - trim_bounds[0] or \
|
||||||
|
node[1] >= trim_center[1] + trim_bounds[1] or \
|
||||||
|
node[1] <= trim_center[1] - trim_bounds[1]
|
||||||
|
|
||||||
|
def unlink_file(path):
|
||||||
|
try:
|
||||||
|
path.unlink()
|
||||||
|
except OSError as err:
|
||||||
|
logger.warning("Unable to delete file: %s", path)
|
||||||
|
logger.warning("Error recieved was: %s", err)
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
for path in args.paths:
|
||||||
|
logger.info("Using regionset path: %s", path)
|
||||||
nodes = get_nodes(path)
|
nodes = get_nodes(path)
|
||||||
if not len(nodes):
|
if not len(nodes):
|
||||||
logger.error('Found no nodes, are you sure there are .mca files in %s ?',
|
logger.error("Found no nodes, are you sure there are .mca files in %s ?",
|
||||||
path)
|
path)
|
||||||
return
|
return
|
||||||
logger.info('Found %d nodes', len(nodes))
|
logger.info("Found %d nodes", len(nodes))
|
||||||
logger.info('Generating graphing nodes...')
|
logger.info("Generating graphing nodes...")
|
||||||
graph, subgraphs = generate_subgraphs(nodes)
|
graph, subgraphs = generate_subgraphs(nodes)
|
||||||
assert len(graph.nodes()) == sum(len(sg.nodes()) for sg in subgraphs)
|
assert len(graph.nodes()) == sum(len(sg.nodes()) for sg in subgraphs)
|
||||||
if len(subgraphs) == 1:
|
if len(subgraphs) == 1:
|
||||||
logger.warn('All regions are contiguous, the needful is done!')
|
logger.warning("All regions are contiguous, the needful is done!")
|
||||||
return
|
return
|
||||||
logger.info('Found %d discrete region sections', len(subgraphs))
|
logger.info("Found %d discrete region sections", len(subgraphs))
|
||||||
subgraphs = sorted(subgraphs, key=lambda sg: len(sg), reverse=True)
|
subgraphs = sorted(subgraphs, key=lambda sg: len(sg), reverse=True)
|
||||||
for i, sg in enumerate(subgraphs):
|
for i, sg in enumerate(subgraphs):
|
||||||
logger.info('Region section #%02d: %04d nodes', i+1, len(sg.nodes()))
|
logger.info("Region section #%02d: %04d nodes", i + 1, len(sg.nodes()))
|
||||||
bounds = get_graph_bounds(sg)
|
bounds = get_graph_bounds(sg)
|
||||||
logger.info('Bounds: %d <-> %d x %d <-> %d', *get_graph_bounds(sg))
|
logger.info("Bounds: %d <-> %d x %d <-> %d", *get_graph_bounds(sg))
|
||||||
center = get_graph_center_by_bounds(bounds)
|
center = get_graph_center_by_bounds(bounds)
|
||||||
logger.info('Center: %d x %d', *center)
|
logger.info("Center: %d x %d", *center)
|
||||||
|
|
||||||
main_section = subgraphs[0]
|
main_section = subgraphs[0]
|
||||||
main_section_bounds = get_graph_bounds(main_section)
|
main_section_bounds = get_graph_bounds(main_section)
|
||||||
main_section_center = get_graph_center_by_bounds(main_section_bounds)
|
main_section_center = get_graph_center_by_bounds(main_section_bounds)
|
||||||
logger.info('Using %d node graph as main section,', len(main_section.nodes()))
|
logger.info("Using %d node graph as main section,", len(main_section.nodes()))
|
||||||
satellite_sections = subgraphs[1:]
|
satellite_sections = subgraphs[1:]
|
||||||
for ss in satellite_sections:
|
for ss in satellite_sections:
|
||||||
bounds = get_graph_bounds(ss)
|
bounds = get_graph_bounds(ss)
|
||||||
center = get_graph_center_by_bounds(bounds)
|
center = get_graph_center_by_bounds(bounds)
|
||||||
logger.info('Checking satellite section with %d nodes, %d <-> %d x %d <-> %d bounds and %d x %d center',
|
logger.info(("Checking satellite section with %d nodes, "
|
||||||
|
"%d <-> %d x %d <-> %d bounds and %d x %d center"),
|
||||||
len(ss.nodes()), *(bounds + center))
|
len(ss.nodes()), *(bounds + center))
|
||||||
if options['trim_disconnected']:
|
|
||||||
logger.info('Trimming regions: %s', ', '.join(
|
if args.trim_disconnected:
|
||||||
get_region_file_from_node(path, n) for n in ss.nodes()))
|
trim_regions(ss, path, dry_run=args.dry_run)
|
||||||
for n, region_file in ((n, get_region_file_from_node(path, n)) \
|
|
||||||
for n in ss.nodes()):
|
if args.trim_outside_main:
|
||||||
ss.remove_node(n)
|
if is_outside_main(ss, center, main_section_bounds):
|
||||||
if not options['dry_run']:
|
logger.info("Section is outside main section bounds")
|
||||||
unlink_file(region_file)
|
trim_regions(ss, path, dry_run=args.dry_run)
|
||||||
if options['trim_outside_main']:
|
|
||||||
if center[0] <= main_section_bounds[0] and center[0] >= main_section_bounds[1] and \
|
|
||||||
center[1] <= main_section_bounds[2] and center[1] >= main_section_bounds[3]:
|
|
||||||
logger.info('Section falls inside main section bounds, ignoring')
|
|
||||||
else:
|
else:
|
||||||
logger.info('Section is outside main section bounds')
|
logger.info("Section falls inside main section bounds, ignoring")
|
||||||
logger.info('Trimming regions: %s', ', '.join(
|
|
||||||
get_region_file_from_node(path, n) for n in ss.nodes()))
|
if args.trim_outside_bounds:
|
||||||
for n, region_file in ((n, get_region_file_from_node(path, n)) \
|
logger.info("Checking regions outside specified bounds")
|
||||||
for n in ss.nodes()):
|
trim_center = args.trim_outside_bounds.get("center", main_section_center)
|
||||||
ss.remove_node(n)
|
trim_bounds = args.trim_outside_bounds["bounds"]
|
||||||
if not options['dry_run']:
|
trim_regions(ss, path, dry_run=args.dry_run,
|
||||||
unlink_file(region_file)
|
filter_func=lambda n: is_outside_bounds(n, trim_center, trim_bounds))
|
||||||
if options['trim_outside_bounds']:
|
|
||||||
x = map(int, options['trim_outside_bounds'].split(','))
|
|
||||||
|
def dir_path(path):
|
||||||
|
p = Path(path)
|
||||||
|
if not p.is_dir():
|
||||||
|
raise argparse.ArgumentTypeError("Not a valid directory path")
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
def center_bound(value):
|
||||||
|
x = [int(v) for v in value.split(",")]
|
||||||
if len(x) == 4:
|
if len(x) == 4:
|
||||||
trim_center = x[:2]
|
return {"center": x[:2], "bounds": x[2:]}
|
||||||
trim_bounds = x[2:]
|
|
||||||
elif len(x) == 2:
|
elif len(x) == 2:
|
||||||
trim_center = main_section_center
|
return {"bounds": x}
|
||||||
trim_bounds = x
|
|
||||||
else:
|
else:
|
||||||
logger.error('Invalid center/bound value: %s',
|
raise argparse.ArgumentTypeError("Invalid center/bound value")
|
||||||
options['trim_outside_bounds'])
|
|
||||||
continue
|
|
||||||
for node in ss.nodes():
|
|
||||||
if node[0] >= trim_center[0] + trim_bounds[0] or \
|
|
||||||
node[0] <= trim_center[0] - trim_bounds[0] or \
|
|
||||||
node[1] >= trim_center[1] + trim_bounds[1] or \
|
|
||||||
node[1] <= trim_center[1] - trim_bounds[1]:
|
|
||||||
region_file = get_region_file_from_node(path, node)
|
|
||||||
logger.info('Region falls outside specified bounds, trimming: %s',
|
|
||||||
region_file)
|
|
||||||
ss.remove_node(node)
|
|
||||||
if not options['dry_run']:
|
|
||||||
unlink_file(region_file)
|
|
||||||
|
|
||||||
def unlink_file(path):
|
|
||||||
try:
|
|
||||||
os.unlink(path)
|
|
||||||
except OSError as err:
|
|
||||||
logger.warn('Unable to delete file: %s', path)
|
|
||||||
logger.warn('Error recieved was: %s', err)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
import optparse
|
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
parser = optparse.OptionParser(
|
|
||||||
usage='Usage: %prog [options] <path/to/region/directory>')
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
parser.add_option('-D', '--trim-disconnected', action='store_true', default=False,
|
parser.add_argument("paths", metavar="<path/to/region/directory>", nargs="+", type=dir_path)
|
||||||
help='Trim all disconnected regions')
|
parser.add_argument("-D", "--trim-disconnected", action="store_true",
|
||||||
parser.add_option('-M', '--trim-outside-main', action='store_true', default=False,
|
default=False, help="Trim all disconnected regions")
|
||||||
help='Trim disconnected regions outside main section bounds')
|
parser.add_argument("-M", "--trim-outside-main", action="store_true",
|
||||||
parser.add_option('-B', '--trim-outside-bounds', default=False,
|
default=False, help="Trim disconnected regions outside main section bounds")
|
||||||
metavar='[center_X,center_Y,]bound_X,bound_Y',
|
parser.add_argument("-B", "--trim-outside-bounds",
|
||||||
help='Trim outside given bounds (given as [center_X,center_Y,]bound_X,bound_Y)')
|
metavar="[center_X,center_Y,]bound_X,bound_Y", type=center_bound,
|
||||||
parser.add_option('-n', '--dry-run', action='store_true', default=False,
|
help=("Trim outside given bounds "
|
||||||
help='Don\'t actually delete anything')
|
"(given as [center_X,center_Y,]bound_X,bound_Y)"))
|
||||||
opts, args = parser.parse_args()
|
parser.add_argument("-n", "--dry-run", action="store_true", default=False,
|
||||||
main(*args, **vars(opts))
|
help="Don't actually delete anything")
|
||||||
|
args = parser.parse_args()
|
||||||
|
main(args)
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
'''
|
|
||||||
Generate a region list to rerender certain chunks
|
|
||||||
|
|
||||||
This is used to force the regeneration of any chunks that contain a certain
|
|
||||||
blockID. The output is a chunklist file that is suitable to use with the
|
|
||||||
--chunklist option to overviewer.py.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
python contrib/rerenderBlocks.py --ids=46,79,91 --world=world/> regionlist.txt
|
|
||||||
python overviewer.py --regionlist=regionlist.txt world/ output_dir/
|
|
||||||
|
|
||||||
This will rerender any chunks that contain either TNT (46), Ice (79), or
|
|
||||||
a Jack-O-Lantern (91)
|
|
||||||
'''
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
import sys,os
|
|
||||||
import re
|
|
||||||
|
|
||||||
# incantation to be able to import overviewer_core
|
|
||||||
if not hasattr(sys, "frozen"):
|
|
||||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
|
|
||||||
|
|
||||||
from overviewer_core import nbt
|
|
||||||
from overviewer_core import world
|
|
||||||
from overviewer_core.chunk import get_blockarray
|
|
||||||
|
|
||||||
parser = OptionParser()
|
|
||||||
parser.add_option("--ids", dest="ids", type="string")
|
|
||||||
parser.add_option("--world", dest="world", type="string")
|
|
||||||
|
|
||||||
|
|
||||||
options, args = parser.parse_args()
|
|
||||||
|
|
||||||
if not options.world or not options.ids:
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if not os.path.exists(options.world):
|
|
||||||
raise Exception("%s does not exist" % options.world)
|
|
||||||
|
|
||||||
ids = map(lambda x: int(x),options.ids.split(","))
|
|
||||||
sys.stderr.write("Searching for these blocks: %r...\n" % ids)
|
|
||||||
|
|
||||||
|
|
||||||
matcher = re.compile(r"^r\..*\.mcr$")
|
|
||||||
|
|
||||||
for dirpath, dirnames, filenames in os.walk(options.world):
|
|
||||||
for f in filenames:
|
|
||||||
if matcher.match(f):
|
|
||||||
full = os.path.join(dirpath, f)
|
|
||||||
r = nbt.load_region(full, 'lower-left')
|
|
||||||
chunks = r.get_chunks()
|
|
||||||
found = False
|
|
||||||
for x,y in chunks:
|
|
||||||
chunk = r.load_chunk(x,y).read_all()
|
|
||||||
blocks = get_blockarray(chunk[1]['Level'])
|
|
||||||
for i in ids:
|
|
||||||
if chr(i) in blocks:
|
|
||||||
print full
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
if found:
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,43 +1,47 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
"Test Render Script"
|
"Test Render Script"
|
||||||
|
|
||||||
import os, shutil, tempfile, time, sys, math, re
|
import argparse
|
||||||
from subprocess import Popen, PIPE, STDOUT, CalledProcessError
|
import math
|
||||||
from optparse import OptionParser
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
from shlex import split
|
||||||
|
from subprocess import PIPE, STDOUT, CalledProcessError, run
|
||||||
|
|
||||||
overviewer_scripts = ['./overviewer.py', './gmap.py']
|
overviewer_scripts = ['./overviewer.py', './gmap.py']
|
||||||
|
|
||||||
def check_call(*args, **kwargs):
|
|
||||||
quiet = False
|
|
||||||
if "quiet" in kwargs.keys():
|
|
||||||
quiet = kwargs["quiet"]
|
|
||||||
del kwargs["quiet"]
|
|
||||||
if quiet:
|
|
||||||
kwargs['stdout'] = PIPE
|
|
||||||
kwargs['stderr'] = STDOUT
|
|
||||||
p = Popen(*args, **kwargs)
|
|
||||||
output = ""
|
|
||||||
if quiet:
|
|
||||||
while p.poll() == None:
|
|
||||||
output += p.communicate()[0]
|
|
||||||
returncode = p.wait()
|
|
||||||
if returncode:
|
|
||||||
if quiet:
|
|
||||||
print output
|
|
||||||
raise CalledProcessError(returncode, args)
|
|
||||||
return returncode
|
|
||||||
|
|
||||||
def check_output(*args, **kwargs):
|
def check_call(args, verbose=False):
|
||||||
kwargs['stdout'] = PIPE
|
try:
|
||||||
# will hang for HUGE output... you were warned
|
return run(
|
||||||
p = Popen(*args, **kwargs)
|
args,
|
||||||
returncode = p.wait()
|
check=True,
|
||||||
if returncode:
|
stdout=None if verbose else PIPE,
|
||||||
raise CalledProcessError(returncode, args)
|
stderr=None if verbose else STDOUT,
|
||||||
return p.communicate()[0]
|
universal_newlines=True,
|
||||||
|
)
|
||||||
|
except CalledProcessError as e:
|
||||||
|
if verbose:
|
||||||
|
print(e.output)
|
||||||
|
raise e
|
||||||
|
|
||||||
def clean_render(overviewerargs, quiet):
|
|
||||||
|
def check_output(args):
|
||||||
|
p = run(
|
||||||
|
args,
|
||||||
|
check=True,
|
||||||
|
stdout=PIPE,
|
||||||
|
universal_newlines=True
|
||||||
|
)
|
||||||
|
return p.stdout
|
||||||
|
|
||||||
|
|
||||||
|
def clean_render(overviewerargs, verbose=False):
|
||||||
tempdir = tempfile.mkdtemp('mc-overviewer-test')
|
tempdir = tempfile.mkdtemp('mc-overviewer-test')
|
||||||
overviewer_script = None
|
overviewer_script = None
|
||||||
for script in overviewer_scripts:
|
for script in overviewer_scripts:
|
||||||
@@ -50,112 +54,121 @@ def clean_render(overviewerargs, quiet):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# check_call raises CalledProcessError when overviewer.py exits badly
|
# check_call raises CalledProcessError when overviewer.py exits badly
|
||||||
check_call([sys.executable, 'setup.py', 'clean', 'build'], quiet=quiet)
|
check_call([sys.executable] + split("setup.py clean build"), verbose=verbose)
|
||||||
try:
|
try:
|
||||||
check_call([sys.executable, overviewer_script, '-d'] + overviewerargs, quiet=quiet)
|
check_call([sys.executable, overviewer_script, '-d'] + overviewerargs, verbose=verbose)
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
pass
|
pass
|
||||||
starttime = time.time()
|
starttime = time.time()
|
||||||
check_call([sys.executable, overviewer_script,] + overviewerargs + [tempdir,], quiet=quiet)
|
check_call([sys.executable, overviewer_script] +
|
||||||
|
overviewerargs + [tempdir, ], verbose=verbose)
|
||||||
endtime = time.time()
|
endtime = time.time()
|
||||||
|
|
||||||
return endtime - starttime
|
return endtime - starttime
|
||||||
finally:
|
finally:
|
||||||
shutil.rmtree(tempdir, True)
|
shutil.rmtree(tempdir, True)
|
||||||
|
|
||||||
|
|
||||||
def get_stats(timelist):
|
def get_stats(timelist):
|
||||||
stats = {}
|
average = sum(timelist) / float(len(timelist))
|
||||||
|
meandiff = [(x - average) ** 2 for x in timelist]
|
||||||
|
sd = math.sqrt(sum(meandiff) / len(meandiff))
|
||||||
|
return {
|
||||||
|
"count": len(timelist),
|
||||||
|
"minimum": min(timelist),
|
||||||
|
"maximum": max(timelist),
|
||||||
|
"average": average,
|
||||||
|
"standard deviation": sd
|
||||||
|
}
|
||||||
|
|
||||||
stats['count'] = len(timelist)
|
|
||||||
stats['minimum'] = min(timelist)
|
|
||||||
stats['maximum'] = max(timelist)
|
|
||||||
stats['average'] = sum(timelist) / float(len(timelist))
|
|
||||||
|
|
||||||
meandiff = map(lambda x: (x - stats['average'])**2, timelist)
|
def get_current_branch():
|
||||||
stats['standard deviation'] = math.sqrt(sum(meandiff) / float(len(meandiff)))
|
gittext = check_output(split('git rev-parse --abbrev-ref HEAD'))
|
||||||
|
return gittext.strip() if gittext != "HEAD" else None
|
||||||
|
|
||||||
return stats
|
|
||||||
|
|
||||||
commitre = re.compile('^commit ([a-z0-9]{40})$', re.MULTILINE)
|
|
||||||
branchre = re.compile('^\\* (.+)$', re.MULTILINE)
|
|
||||||
def get_current_commit():
|
def get_current_commit():
|
||||||
gittext = check_output(['git', 'branch'])
|
gittext = check_output(split('git rev-parse HEAD'))
|
||||||
match = branchre.search(gittext)
|
return gittext.strip() if gittext else None
|
||||||
if match and not ("no branch" in match.group(1)):
|
|
||||||
return match.group(1)
|
|
||||||
gittext = check_output(['git', 'show', 'HEAD'])
|
def get_current_ref():
|
||||||
match = commitre.match(gittext)
|
branch = get_current_branch()
|
||||||
if match == None:
|
if branch:
|
||||||
return None
|
return branch
|
||||||
return match.group(1)
|
|
||||||
|
commit = get_current_commit()
|
||||||
|
if commit:
|
||||||
|
return commit
|
||||||
|
|
||||||
|
|
||||||
def get_commits(gitrange):
|
def get_commits(gitrange):
|
||||||
gittext = check_output(['git', 'log', '--raw', '--reverse', gitrange])
|
gittext = check_output(split('git rev-list --reverse') + [gitrange, ])
|
||||||
for match in commitre.finditer(gittext):
|
return (c for c in gittext.split("\n"))
|
||||||
yield match.group(1)
|
|
||||||
|
|
||||||
def set_commit(commit):
|
def set_commit(commit):
|
||||||
check_call(['git', 'checkout', commit], quiet=True)
|
check_call(split('git checkout') + [commit, ])
|
||||||
|
|
||||||
parser = OptionParser(usage="usage: %prog [options] -- [overviewer options/world]")
|
|
||||||
parser.add_option("-n", "--number", metavar="N",
|
|
||||||
action="store", type="int", dest="number", default=3,
|
|
||||||
help="number of renders per commit [default: 3]")
|
|
||||||
parser.add_option("-c", "--commits", metavar="RANGE",
|
|
||||||
action="append", type="string", dest="commits", default=[],
|
|
||||||
help="the commit (or range of commits) to test [default: current]")
|
|
||||||
parser.add_option("-v", "--verbose",
|
|
||||||
action="store_false", dest="quiet", default=True,
|
|
||||||
help="don't suppress overviewer output")
|
|
||||||
parser.add_option("-k", "--keep-going",
|
|
||||||
action="store_false", dest="fatal_errors", default=True,
|
|
||||||
help="don't stop testing when Overviewer croaks")
|
|
||||||
parser.add_option("-l", "--log", dest="log", default="", metavar="FILE",
|
|
||||||
help="log all test results to a file")
|
|
||||||
|
|
||||||
(options, args) = parser.parse_args()
|
def main(args):
|
||||||
|
commits = []
|
||||||
if len(args) == 0:
|
for commit in args.commits:
|
||||||
parser.print_help()
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
commits = []
|
|
||||||
for commit in options.commits:
|
|
||||||
if '..' in commit:
|
if '..' in commit:
|
||||||
commits = get_commits(commit)
|
commits = get_commits(commit)
|
||||||
else:
|
else:
|
||||||
commits.append(commit)
|
commits.append(commit)
|
||||||
if not commits:
|
if not commits:
|
||||||
commits = [get_current_commit(),]
|
commits = [get_current_ref(), ]
|
||||||
|
|
||||||
log = None
|
log = None
|
||||||
if options.log != "":
|
if args.log:
|
||||||
log = open(options.log, "w")
|
log = args.log
|
||||||
|
|
||||||
reset_commit = get_current_commit()
|
reset_commit = get_current_ref()
|
||||||
try:
|
try:
|
||||||
for commit in commits:
|
for commit in commits:
|
||||||
print "testing commit", commit
|
print("testing commit", commit)
|
||||||
set_commit(commit)
|
set_commit(commit)
|
||||||
timelist = []
|
timelist = []
|
||||||
print " -- ",
|
print(" -- "),
|
||||||
try:
|
try:
|
||||||
for i in range(options.number):
|
for i in range(args.number):
|
||||||
sys.stdout.write(str(i+1)+" ")
|
sys.stdout.write(str(i + 1) + " ")
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
timelist.append(clean_render(args, options.quiet))
|
timelist.append(clean_render(args.overviewer_args, verbose=args.verbose))
|
||||||
print "... done"
|
print("... done")
|
||||||
stats = get_stats(timelist)
|
stats = get_stats(timelist)
|
||||||
print stats
|
print(stats)
|
||||||
if log:
|
if log:
|
||||||
log.write("%s %s\n" % (commit, repr(stats)))
|
log.write("%s %s\n" % (commit, repr(stats)))
|
||||||
except CalledProcessError, e:
|
except CalledProcessError as e:
|
||||||
if options.fatal_errors:
|
if args.fatal_errors:
|
||||||
print
|
print(e)
|
||||||
print "Overviewer croaked, exiting..."
|
print("Overviewer croaked, exiting...")
|
||||||
print "(to avoid this, use --keep-going)"
|
print("(to avoid this, use --keep-going)")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
finally:
|
finally:
|
||||||
set_commit(reset_commit)
|
set_commit(reset_commit)
|
||||||
if log:
|
if log:
|
||||||
log.close()
|
log.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
|
parser.add_argument("overviewer_args", metavar="[overviewer options/world]", nargs="+")
|
||||||
|
parser.add_argument("-n", "--option", metavar="N", type=int, action="store",
|
||||||
|
dest="number", default=3, help="number of renders per commit [default: 3]")
|
||||||
|
parser.add_argument("-c", "--commits", metavar="RANGE",
|
||||||
|
action="append", type=str, dest="commits", default=[],
|
||||||
|
help="the commit (or range of commits) to test [default: current]")
|
||||||
|
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||||
|
help="don't suppress overviewer output")
|
||||||
|
parser.add_argument("-k", "--keep-going",
|
||||||
|
action="store_false", dest="fatal_errors", default=True,
|
||||||
|
help="don't stop testing when Overviewer croaks")
|
||||||
|
parser.add_argument("-l", "--log", dest="log", type=argparse.FileType('w'), metavar="FILE",
|
||||||
|
help="log all test results to a file")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
main(args)
|
||||||
|
|||||||
@@ -1,114 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
'''
|
|
||||||
Validate a region file
|
|
||||||
|
|
||||||
TODO description here'''
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
# incantation to be able to import overviewer_core
|
|
||||||
if not hasattr(sys, "frozen"):
|
|
||||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
|
|
||||||
|
|
||||||
from overviewer_core import nbt
|
|
||||||
|
|
||||||
def check_region(region_filename):
|
|
||||||
chunk_errors = []
|
|
||||||
if not os.path.exists(region_filename):
|
|
||||||
raise Exception('Region file not found: %s' % region_filename)
|
|
||||||
try:
|
|
||||||
region = nbt.load_region(region_filename, 'lower-left')
|
|
||||||
except IOError, e:
|
|
||||||
raise Exception('Error loading region (%s): %s' % (region_filename, e))
|
|
||||||
try:
|
|
||||||
region.get_chunk_info(False)
|
|
||||||
chunks = region.get_chunks()
|
|
||||||
except IOError, e:
|
|
||||||
raise Exception('Error reading region header (%s): %s' % (region_filename, e))
|
|
||||||
except Exception, e:
|
|
||||||
raise Exception('Error reading region (%s): %s' % (region_filename, e))
|
|
||||||
for x,y in chunks:
|
|
||||||
try:
|
|
||||||
check_chunk(region, x, y)
|
|
||||||
except Exception, e:
|
|
||||||
chunk_errors.append(e)
|
|
||||||
return (chunk_errors, len(chunks))
|
|
||||||
|
|
||||||
def check_chunk(region, x, y):
|
|
||||||
try:
|
|
||||||
data = region.load_chunk(x ,y)
|
|
||||||
except Exception, e:
|
|
||||||
raise Exception('Error reading chunk (%i, %i): %s' % (x, y, e))
|
|
||||||
if data is None:
|
|
||||||
raise Exception('Chunk (%i, %i) is unexpectedly empty' % (x, y))
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
processed_data = data.read_all()
|
|
||||||
except Exception, e:
|
|
||||||
raise Exception('Error reading chunk (%i, %i) data: %s' % (x, y, e))
|
|
||||||
if processed_data == []:
|
|
||||||
raise Exception('Chunk (%i, %i) is an unexpectedly empty set' % (x, y))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
parser = OptionParser(usage='python contrib/%prog [OPTIONS] <path/to/regions|path/to/regions/*.mcr|regionfile1.mcr regionfile2.mcr ...>',
|
|
||||||
description='This script will valide a minecraft region file for errors.')
|
|
||||||
parser.add_option('-v', dest='verbose', action='store_true', help='Print additional information.')
|
|
||||||
opts, args = parser.parse_args()
|
|
||||||
|
|
||||||
region_files = []
|
|
||||||
for path in args:
|
|
||||||
if os.path.isdir(path):
|
|
||||||
for dirpath, dirnames, filenames in os.walk(path, True):
|
|
||||||
for filename in filenames:
|
|
||||||
if filename.startswith('r.') and filename.endswith('.mcr'):
|
|
||||||
if filename not in region_files:
|
|
||||||
region_files.append(os.path.join(dirpath, filename))
|
|
||||||
elif opts.verbose:
|
|
||||||
print('Ignoring non-region file: %s' % os.path.join(dirpath, filename))
|
|
||||||
elif os.path.isfile(path):
|
|
||||||
dirpath,filename = os.path.split(path)
|
|
||||||
if filename.startswith('r.') and filename.endswith('.mcr'):
|
|
||||||
if path not in region_files:
|
|
||||||
region_files.append(path)
|
|
||||||
else:
|
|
||||||
print('Ignoring non-region file: %s' % path)
|
|
||||||
else:
|
|
||||||
if opts.verbose:
|
|
||||||
print('Ignoring arg: %s' % path)
|
|
||||||
if len(region_files) < 1:
|
|
||||||
print 'You must list at least one region file.'
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
overall_chunk_total = 0
|
|
||||||
bad_chunk_total = 0
|
|
||||||
bad_region_total = 0
|
|
||||||
for region_file in region_files:
|
|
||||||
try:
|
|
||||||
(chunk_errors, region_chunks) = check_region(region_file)
|
|
||||||
bad_chunk_total += len(chunk_errors)
|
|
||||||
overall_chunk_total += region_chunks
|
|
||||||
except Exception, e:
|
|
||||||
bad_region_total += 1
|
|
||||||
print('FAILED(%s): %s' % (region_file, e))
|
|
||||||
else:
|
|
||||||
if len(chunk_errors) is not 0:
|
|
||||||
print('WARNING(%s) Chunks: %i/%' % (region_file, region_chunks - len(chunk_errors), region_chunks))
|
|
||||||
if opts.verbose:
|
|
||||||
for error in chunk_errors:
|
|
||||||
print(error)
|
|
||||||
elif opts.verbose:
|
|
||||||
print ('PASSED(%s) Chunks: %i/%i' % (region_file, region_chunks - len(chunk_errors), region_chunks))
|
|
||||||
if opts.verbose:
|
|
||||||
print 'REGIONS: %i/%i' % (len(region_files) - bad_region_total, len(region_files))
|
|
||||||
print 'CHUNKS: %i/%i' % (overall_chunk_total - bad_chunk_total, overall_chunk_total)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
sys.exit(1)
|
|
||||||
except Exception, e:
|
|
||||||
print('ERROR: %s' % e)
|
|
||||||
|
|
||||||
@@ -10,9 +10,7 @@ import sys
|
|||||||
scripts = { # keys are names, values are scripts
|
scripts = { # keys are names, values are scripts
|
||||||
"convertCyrillic": "cyrillic_convert.py",
|
"convertCyrillic": "cyrillic_convert.py",
|
||||||
"playerInspect": "playerInspect.py",
|
"playerInspect": "playerInspect.py",
|
||||||
"rerenderBlocks": "rerenderBlocks.py",
|
|
||||||
"testRender": "testRender.py",
|
"testRender": "testRender.py",
|
||||||
"validate": "validateRegionFile.py",
|
|
||||||
"pngit": "png-it.py",
|
"pngit": "png-it.py",
|
||||||
"gallery": "gallery.py",
|
"gallery": "gallery.py",
|
||||||
"regionTrimmer": "regionTrimmer.py",
|
"regionTrimmer": "regionTrimmer.py",
|
||||||
@@ -21,9 +19,9 @@ scripts = { # keys are names, values are scripts
|
|||||||
|
|
||||||
# you can symlink or hardlink contribManager.py to another name to have it
|
# you can symlink or hardlink contribManager.py to another name to have it
|
||||||
# automatically find the right script to run. For example:
|
# automatically find the right script to run. For example:
|
||||||
# > ln -s contribManager.py validate.exe
|
# > ln -s contribManager.py pngit.exe
|
||||||
# > chmod +x validate.exe
|
# > chmod +x pngit.exe
|
||||||
# > ./validate.exe -h
|
# > ./pngit.exe -h
|
||||||
|
|
||||||
|
|
||||||
# figure out what script to execute
|
# figure out what script to execute
|
||||||
|
|||||||
@@ -2,7 +2,10 @@
|
|||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
# For convenience
|
# For convenience
|
||||||
import sys,os,logging
|
import sys
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
sys.path.insert(0, os.getcwd())
|
sys.path.insert(0, os.getcwd())
|
||||||
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
|
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
|
||||||
|
|
||||||
@@ -12,15 +15,23 @@ from test_rendertileset import RendertileSetTest
|
|||||||
from test_settings import SettingsTest
|
from test_settings import SettingsTest
|
||||||
from test_tileset import TilesetTest
|
from test_tileset import TilesetTest
|
||||||
from test_cache import TestLRU
|
from test_cache import TestLRU
|
||||||
|
from test_contributors import TestContributors
|
||||||
|
from test_cyrillic_convert import TestCyrillicConvert
|
||||||
|
from test_playerInspect import TestPlayerInspect
|
||||||
|
from test_regionTrimmer import TestRegionTrimmer
|
||||||
|
from test_testRender import TestTestRender
|
||||||
|
|
||||||
# DISABLE THIS BLOCK TO GET LOG OUTPUT FROM TILESET FOR DEBUGGING
|
# DISABLE THIS BLOCK TO GET LOG OUTPUT FROM TILESET FOR DEBUGGING
|
||||||
if 0:
|
if 0:
|
||||||
root = logging.getLogger()
|
root = logging.getLogger()
|
||||||
|
|
||||||
class NullHandler(logging.Handler):
|
class NullHandler(logging.Handler):
|
||||||
def handle(self, record):
|
def handle(self, record):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def createLock(self):
|
def createLock(self):
|
||||||
self.lock = None
|
self.lock = None
|
||||||
root.addHandler(NullHandler())
|
root.addHandler(NullHandler())
|
||||||
|
|||||||
160
test/test_contributors.py
Normal file
160
test/test_contributors.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
import unittest
|
||||||
|
from io import StringIO, BytesIO
|
||||||
|
from textwrap import dedent
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import contrib.contributors as contrib
|
||||||
|
|
||||||
|
|
||||||
|
class TestContributors(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.contrib_file_lines = dedent("""\
|
||||||
|
============
|
||||||
|
Contributors
|
||||||
|
============
|
||||||
|
|
||||||
|
This file contains a list of every person who has contributed code to
|
||||||
|
Overviewer.
|
||||||
|
|
||||||
|
---------------
|
||||||
|
Original Author
|
||||||
|
---------------
|
||||||
|
|
||||||
|
* Andrew Brown <brownan@gmail.com>
|
||||||
|
|
||||||
|
-------------------------
|
||||||
|
Long-term Contributions
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
These contributors have made many changes, over a fairly long time span, or
|
||||||
|
for many different parts of the code.
|
||||||
|
|
||||||
|
* Alejandro Aguilera <fenixin@lavabit.com>
|
||||||
|
|
||||||
|
------------------------
|
||||||
|
Short-term Contributions
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
These contributors have made specific changes for a particular bug fix or
|
||||||
|
feature.
|
||||||
|
|
||||||
|
* 3decibels <3db@3decibels.net>""").split("\n")
|
||||||
|
|
||||||
|
def test_format_contributor_single_name(self):
|
||||||
|
contributor = {"name": "John", "email": "<john@gmail.com>"}
|
||||||
|
self.assertEqual(
|
||||||
|
contrib.format_contributor(contributor),
|
||||||
|
" * John <john@gmail.com>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_format_contributor_multiple_names(self):
|
||||||
|
contributor = {"name": "John Smith", "email": "<john@gmail.com>"}
|
||||||
|
self.assertEqual(
|
||||||
|
contrib.format_contributor(contributor),
|
||||||
|
" * John Smith <john@gmail.com>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_get_old_contributors(self):
|
||||||
|
expected = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
|
||||||
|
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
|
||||||
|
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
|
||||||
|
|
||||||
|
self.assertListEqual(contrib.get_old_contributors(self.contrib_file_lines), expected)
|
||||||
|
|
||||||
|
@patch('subprocess.run')
|
||||||
|
def test_get_contributors(self, mock_run):
|
||||||
|
mock_run.return_value.stdout = dedent("""\
|
||||||
|
1 3decibels <3db@3decibels.net>
|
||||||
|
585 Aaron Griffith <aargri@gmail.com>
|
||||||
|
1 Aaron1011 <aa1ronham@gmail.com>
|
||||||
|
""").encode()
|
||||||
|
expected = [{"count": 1, "name": "3decibels", "email": "<3db@3decibels.net>"},
|
||||||
|
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
|
||||||
|
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
|
||||||
|
self.assertListEqual(contrib.get_contributors(), expected)
|
||||||
|
|
||||||
|
def test_get_new_contributors_new_contributors_alphabetical_order(self):
|
||||||
|
contributors = [{"count": 1, "name": "3decibels", "email": "<3db@3decibels.net>"},
|
||||||
|
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
|
||||||
|
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
|
||||||
|
|
||||||
|
old_contributors = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
|
||||||
|
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
|
||||||
|
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
|
||||||
|
|
||||||
|
new_contributors, new_alias, new_email = contrib.get_new_contributors(
|
||||||
|
contributors, old_contributors)
|
||||||
|
|
||||||
|
self.assertListEqual(new_contributors, [{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}, {
|
||||||
|
"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"}])
|
||||||
|
|
||||||
|
def test_get_new_contributors_new_alias(self):
|
||||||
|
contributors = [{"count": 1, "name": "new_name", "email": "<3db@3decibels.net>"},
|
||||||
|
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
|
||||||
|
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
|
||||||
|
|
||||||
|
old_contributors = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
|
||||||
|
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
|
||||||
|
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
|
||||||
|
|
||||||
|
new_contributors, new_alias, new_email = contrib.get_new_contributors(
|
||||||
|
contributors, old_contributors)
|
||||||
|
self.assertListEqual(
|
||||||
|
new_alias, [({"count": 1, "name": "new_name", "email": "<3db@3decibels.net>"}, "3decibels")])
|
||||||
|
|
||||||
|
def test_get_new_contributors_new_email(self):
|
||||||
|
contributors = [{"count": 1, "name": "3decibels", "email": "<3db@3decibels.com>"},
|
||||||
|
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
|
||||||
|
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
|
||||||
|
|
||||||
|
old_contributors = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
|
||||||
|
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
|
||||||
|
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
|
||||||
|
|
||||||
|
new_contributors, new_alias, new_email = contrib.get_new_contributors(
|
||||||
|
contributors, old_contributors)
|
||||||
|
self.assertListEqual(
|
||||||
|
new_email, [({"count": 1, "name": "3decibels", "email": "<3db@3decibels.com>"}, "<3db@3decibels.net>")])
|
||||||
|
|
||||||
|
def test_merge_short_term_contributors(self):
|
||||||
|
new_contributors = [{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}, {
|
||||||
|
"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"}]
|
||||||
|
expected = ['============',
|
||||||
|
'Contributors',
|
||||||
|
'============',
|
||||||
|
'',
|
||||||
|
'This file contains a list of every person who has contributed code to',
|
||||||
|
'Overviewer.',
|
||||||
|
'',
|
||||||
|
'---------------',
|
||||||
|
'Original Author',
|
||||||
|
'---------------',
|
||||||
|
'',
|
||||||
|
' * Andrew Brown <brownan@gmail.com>',
|
||||||
|
'',
|
||||||
|
'-------------------------',
|
||||||
|
'Long-term Contributions',
|
||||||
|
'-------------------------',
|
||||||
|
'',
|
||||||
|
'These contributors have made many changes, over a fairly long time span, or',
|
||||||
|
'for many different parts of the code.',
|
||||||
|
'',
|
||||||
|
' * Alejandro Aguilera <fenixin@lavabit.com>',
|
||||||
|
'',
|
||||||
|
'------------------------',
|
||||||
|
'Short-term Contributions',
|
||||||
|
'------------------------',
|
||||||
|
'',
|
||||||
|
'These contributors have made specific changes for a particular bug fix or',
|
||||||
|
'feature.',
|
||||||
|
'',
|
||||||
|
' * 3decibels <3db@3decibels.net>',
|
||||||
|
' * Aaron1011 <aa1ronham@gmail.com>\n',
|
||||||
|
' * Aaron Griffith <aargri@gmail.com>\n']
|
||||||
|
|
||||||
|
self.assertListEqual(contrib.merge_short_term_contributors(
|
||||||
|
self.contrib_file_lines, new_contributors), expected)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
11
test/test_cyrillic_convert.py
Normal file
11
test/test_cyrillic_convert.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
import unittest
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from contrib.cyrillic_convert import convert
|
||||||
|
|
||||||
|
|
||||||
|
class TestCyrillicConvert(unittest.TestCase):
|
||||||
|
def test_convert(self):
|
||||||
|
gibberish = '{chunk: [-2, 0],y: 65,msg: "ðåëèãèè",x: -20,z: 4,type: "sign"}'
|
||||||
|
cyrillic = '{chunk: [-2, 0],y: 65,msg: "религии",x: -20,z: 4,type: "sign"}'
|
||||||
|
self.assertEqual(convert(gibberish), cyrillic)
|
||||||
176
test/test_playerInspect.py
Normal file
176
test/test_playerInspect.py
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
import unittest
|
||||||
|
from io import StringIO
|
||||||
|
from pathlib import Path
|
||||||
|
from textwrap import dedent
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
import contrib.playerInspect as player_inspect
|
||||||
|
|
||||||
|
|
||||||
|
class TestPlayerInspect(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.player_data = {
|
||||||
|
'AbsorptionAmount': 0.0,
|
||||||
|
'Air': 300,
|
||||||
|
'Attributes': [
|
||||||
|
{'Base': 20.0, 'Name': 'generic.maxHealth'},
|
||||||
|
{'Base': 0.0, 'Name': 'generic.knockbackResistance'},
|
||||||
|
{'Base': 0.10000000149011612, 'Name': 'generic.movementSpeed'},
|
||||||
|
{'Base': 0.0, 'Name': 'generic.armor'},
|
||||||
|
{'Base': 0.0, 'Name': 'generic.armorToughness'},
|
||||||
|
{'Base': 1.0, 'Name': 'generic.attackDamage'},
|
||||||
|
{'Base': 4.0, 'Name': 'generic.attackSpeed'},
|
||||||
|
{'Base': 0.0, 'Name': 'generic.luck'}
|
||||||
|
],
|
||||||
|
'DataVersion': 1631,
|
||||||
|
'DeathTime': 0,
|
||||||
|
'Dimension': 0,
|
||||||
|
'EnderItems': [],
|
||||||
|
'FallDistance': 0.0,
|
||||||
|
'FallFlying': 0,
|
||||||
|
'Fire': -20,
|
||||||
|
'Health': 20.0,
|
||||||
|
'HurtByTimestamp': 0,
|
||||||
|
'HurtTime': 0,
|
||||||
|
'Inventory': [{'Count': 1, 'Slot': -106, 'id': 'minecraft:sign'}],
|
||||||
|
'Invulnerable': 0,
|
||||||
|
'Motion': [0.0, -0.0784000015258789, 0.0],
|
||||||
|
'OnGround': 1,
|
||||||
|
'PortalCooldown': 0,
|
||||||
|
'Pos': [-96.11859857363737, 70.0, -44.17768261916891],
|
||||||
|
'Rotation': [-72.00011444091797, 38.250030517578125],
|
||||||
|
'Score': 0,
|
||||||
|
'SelectedItemSlot': 0,
|
||||||
|
'SleepTimer': 0,
|
||||||
|
'Sleeping': 0,
|
||||||
|
"SpawnX": 10,
|
||||||
|
"SpawnY": 52,
|
||||||
|
"SpawnZ": 10,
|
||||||
|
'UUIDLeast': -7312926203658200544,
|
||||||
|
'UUIDMost': 6651100054519957107,
|
||||||
|
'XpLevel': 0,
|
||||||
|
'XpP': 0.0,
|
||||||
|
'XpSeed': 0,
|
||||||
|
'XpTotal': 0,
|
||||||
|
'abilities': {
|
||||||
|
'flySpeed': 0.05000000074505806,
|
||||||
|
'flying': 0,
|
||||||
|
'instabuild': 1,
|
||||||
|
'invulnerable': 1,
|
||||||
|
'mayBuild': 1,
|
||||||
|
'mayfly': 1,
|
||||||
|
'walkSpeed': 0.10000000149011612
|
||||||
|
},
|
||||||
|
'foodExhaustionLevel': 0.0,
|
||||||
|
'foodLevel': 20,
|
||||||
|
'foodSaturationLevel': 5.0,
|
||||||
|
'foodTickTimer': 0,
|
||||||
|
'playerGameType': 1,
|
||||||
|
'recipeBook': {
|
||||||
|
'isFilteringCraftable': 0,
|
||||||
|
'isFurnaceFilteringCraftable': 0,
|
||||||
|
'isFurnaceGuiOpen': 0,
|
||||||
|
'isGuiOpen': 0,
|
||||||
|
'recipes': [],
|
||||||
|
'toBeDisplayed': []
|
||||||
|
},
|
||||||
|
'seenCredits': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
@patch('sys.stdout', new_callable=StringIO)
|
||||||
|
def test_print_player(self, mock_stdout):
|
||||||
|
expected = "\n".join([
|
||||||
|
"Position:\t-96, 70, -44\t(dim: 0)",
|
||||||
|
"Spawn:\t\t10, 52, 10",
|
||||||
|
"Health:\t20\tLevel:\t\t0\t\tGameType:\t1",
|
||||||
|
"Food:\t20\tTotal XP:\t0",
|
||||||
|
"Inventory: 1 items",
|
||||||
|
" 1 minecraft:sign\n"])
|
||||||
|
|
||||||
|
player_inspect.print_player(self.player_data)
|
||||||
|
|
||||||
|
self.assertEqual(mock_stdout.getvalue(), expected)
|
||||||
|
|
||||||
|
@patch('sys.stdout', new_callable=StringIO)
|
||||||
|
def test_print_player_no_spawn(self, mock_stdout):
|
||||||
|
expected = "\n".join([
|
||||||
|
"Position:\t-96, 70, -44\t(dim: 0)",
|
||||||
|
"Health:\t20\tLevel:\t\t0\t\tGameType:\t1",
|
||||||
|
"Food:\t20\tTotal XP:\t0",
|
||||||
|
"Inventory: 1 items",
|
||||||
|
" 1 minecraft:sign\n"])
|
||||||
|
|
||||||
|
player_data = {
|
||||||
|
k: v for k, v in self.player_data.items()
|
||||||
|
if k not in("SpawnX", "SpawnY", "SpawnZ")
|
||||||
|
}
|
||||||
|
player_inspect.print_player(player_data)
|
||||||
|
|
||||||
|
self.assertEqual(mock_stdout.getvalue(), expected)
|
||||||
|
|
||||||
|
@patch('sys.stdout', new_callable=StringIO)
|
||||||
|
def test_print_player_sub_entry(self, mock_stdout):
|
||||||
|
expected = "\n".join([
|
||||||
|
"\tPosition:\t-96, 70, -44\t(dim: 0)",
|
||||||
|
"\tSpawn:\t\t10, 52, 10",
|
||||||
|
"\tHealth:\t20\tLevel:\t\t0\t\tGameType:\t1",
|
||||||
|
"\tFood:\t20\tTotal XP:\t0",
|
||||||
|
"\tInventory: 1 items\n"])
|
||||||
|
|
||||||
|
player_inspect.print_player(self.player_data, sub_entry=True)
|
||||||
|
|
||||||
|
self.assertEqual(mock_stdout.getvalue(), expected)
|
||||||
|
|
||||||
|
@patch('sys.stdout', new_callable=StringIO)
|
||||||
|
def test_print_player_sub_entry_no_spawn(self, mock_stdout):
|
||||||
|
expected = "\n".join([
|
||||||
|
"\tPosition:\t-96, 70, -44\t(dim: 0)",
|
||||||
|
"\tHealth:\t20\tLevel:\t\t0\t\tGameType:\t1",
|
||||||
|
"\tFood:\t20\tTotal XP:\t0",
|
||||||
|
"\tInventory: 1 items\n"])
|
||||||
|
|
||||||
|
player_data = {
|
||||||
|
k: v for k, v in self.player_data.items()
|
||||||
|
if k not in("SpawnX", "SpawnY", "SpawnZ")
|
||||||
|
}
|
||||||
|
player_inspect.print_player(player_data, sub_entry=True)
|
||||||
|
|
||||||
|
self.assertEqual(mock_stdout.getvalue(), expected)
|
||||||
|
|
||||||
|
def test_find_all_player_files(self):
|
||||||
|
dir_path = MagicMock(Path)
|
||||||
|
files = [Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
|
||||||
|
Path('074c808a-1f04-4bdd-8385-bd74601210a1.dat'),
|
||||||
|
Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat')]
|
||||||
|
dir_path.iterdir.return_value = (f for f in files)
|
||||||
|
|
||||||
|
expected = [(Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
|
||||||
|
'def0492d-0fe9-43ff-a3d5-8c3fc9160c94'),
|
||||||
|
(Path('074c808a-1f04-4bdd-8385-bd74601210a1.dat'),
|
||||||
|
'074c808a-1f04-4bdd-8385-bd74601210a1'),
|
||||||
|
(Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat'),
|
||||||
|
'104e149d-a802-4a27-ac8f-ceab5279087c')]
|
||||||
|
result = player_inspect.find_all_player_files(dir_path)
|
||||||
|
self.assertListEqual(list(result), expected)
|
||||||
|
|
||||||
|
def test_find_player_file(self):
|
||||||
|
dir_path = MagicMock(Path)
|
||||||
|
files = [Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
|
||||||
|
Path('074c808a-1f04-4bdd-8385-bd74601210a1.dat'),
|
||||||
|
Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat')]
|
||||||
|
dir_path.iterdir.return_value = (f for f in files)
|
||||||
|
|
||||||
|
expected = (Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat'),
|
||||||
|
'104e149d-a802-4a27-ac8f-ceab5279087c')
|
||||||
|
result = player_inspect.find_player_file(
|
||||||
|
dir_path, selected_player='104e149d-a802-4a27-ac8f-ceab5279087c')
|
||||||
|
self.assertEqual(result, expected)
|
||||||
|
|
||||||
|
def test_find_player_file_raises_when_selected_player_not_found(self):
|
||||||
|
dir_path = MagicMock(Path)
|
||||||
|
files = [Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
|
||||||
|
Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat')]
|
||||||
|
dir_path.iterdir.return_value = (f for f in files)
|
||||||
|
|
||||||
|
with self.assertRaises(FileNotFoundError):
|
||||||
|
player_inspect.find_player_file(dir_path, selected_player='NON_EXISTENT_UUID')
|
||||||
68
test/test_regionTrimmer.py
Normal file
68
test/test_regionTrimmer.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
|
import networkx
|
||||||
|
|
||||||
|
import contrib.regionTrimmer as region_trimmer
|
||||||
|
|
||||||
|
class TestRegionTrimmer(unittest.TestCase):
|
||||||
|
def test_get_nodes(self):
|
||||||
|
coords = [(0, 0), (0, -1), (-1, 0), (-1, -1)]
|
||||||
|
with TemporaryDirectory() as tmpdirname:
|
||||||
|
region_file = Path(tmpdirname)
|
||||||
|
for x, z in coords:
|
||||||
|
region_fname = "r.{x}.{z}.mca".format(x=x, z=z)
|
||||||
|
(region_file / region_fname).touch()
|
||||||
|
|
||||||
|
nodes = region_trimmer.get_nodes(region_file)
|
||||||
|
self.assertListEqual(sorted(nodes), sorted(coords))
|
||||||
|
|
||||||
|
def test_get_nodes_returns_empty_list_when_no_region_files(self):
|
||||||
|
with TemporaryDirectory() as tmpdirname:
|
||||||
|
region_file = Path(tmpdirname)
|
||||||
|
(region_file / "not_region_file.txt").touch()
|
||||||
|
nodes = region_trimmer.get_nodes(region_file)
|
||||||
|
self.assertListEqual(nodes, [])
|
||||||
|
|
||||||
|
def test_get_region_file_from_node(self):
|
||||||
|
node = (0, 0)
|
||||||
|
regionset_path = Path('/path/to/regions')
|
||||||
|
|
||||||
|
self.assertEqual(region_trimmer.get_region_file_from_node(
|
||||||
|
regionset_path, node), Path('/path/to/regions/r.0.0.mca'))
|
||||||
|
|
||||||
|
def test_get_graph_bounds(self):
|
||||||
|
""" Should return (max_x, min_x, max_z, min_z) of all nodes
|
||||||
|
"""
|
||||||
|
graph = networkx.Graph()
|
||||||
|
graph.add_nodes_from([(0, 0), (0, -1), (-1, 0), (-1, -1)])
|
||||||
|
|
||||||
|
self.assertEqual(region_trimmer.get_graph_bounds(graph), (0, -1, 0, -1))
|
||||||
|
|
||||||
|
def test_get_graph_center_by_bounds(self):
|
||||||
|
self.assertEqual(region_trimmer.get_graph_center_by_bounds((0, -1, 0, -1)), (-1, -1))
|
||||||
|
|
||||||
|
def test_generate_edges(self):
|
||||||
|
graph = networkx.Graph()
|
||||||
|
graph.add_nodes_from(
|
||||||
|
[(0, 0), (0, -1), (-1, 0), (-1, -1)]
|
||||||
|
)
|
||||||
|
graph = region_trimmer.generate_edges(graph)
|
||||||
|
self.assertEqual(
|
||||||
|
graph.adj,
|
||||||
|
{
|
||||||
|
(0, -1): {(0, 0): {}, (-1, -1): {}},
|
||||||
|
(0, 0): {
|
||||||
|
(0, -1): {},
|
||||||
|
(-1, 0): {},
|
||||||
|
(-1, -1): {},
|
||||||
|
},
|
||||||
|
(-1, 0): {(0, 0): {}, (-1, -1): {}},
|
||||||
|
(-1, -1): {
|
||||||
|
(0, -1): {},
|
||||||
|
(0, 0): {},
|
||||||
|
(-1, 0): {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
108
test/test_testRender.py
Normal file
108
test/test_testRender.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from unittest.mock import patch
|
||||||
|
from subprocess import CalledProcessError, PIPE, STDOUT
|
||||||
|
import contrib.testRender as test_render
|
||||||
|
from io import StringIO
|
||||||
|
from shlex import split
|
||||||
|
|
||||||
|
|
||||||
|
class TestTestRender(unittest.TestCase):
|
||||||
|
@patch("contrib.testRender.run")
|
||||||
|
def test_check_call_raises_CalledProcessError_from_subprocess_run(self, m_run):
|
||||||
|
m_run.side_effect = CalledProcessError(1, "python program.js")
|
||||||
|
with self.assertRaises(CalledProcessError):
|
||||||
|
test_render.check_call(["python", "program.js"])
|
||||||
|
|
||||||
|
@patch("contrib.testRender.run")
|
||||||
|
def test_check_call_captures_stdout_if_not_verbose(self, m_run):
|
||||||
|
test_render.check_call(["python", "program.py"])
|
||||||
|
args, kwargs = m_run.call_args
|
||||||
|
self.assertEqual(kwargs['stdout'], PIPE)
|
||||||
|
self.assertEqual(kwargs['stderr'], STDOUT)
|
||||||
|
|
||||||
|
@patch("contrib.testRender.run")
|
||||||
|
def test_check_call_does_not_capture_stdout_if_verbose(self, m_run):
|
||||||
|
test_render.check_call(["python", "program.py"], verbose=True)
|
||||||
|
args, kwargs = m_run.call_args
|
||||||
|
self.assertEqual(kwargs['stdout'], None)
|
||||||
|
self.assertEqual(kwargs['stderr'], None)
|
||||||
|
|
||||||
|
@patch('sys.stdout', new_callable=StringIO)
|
||||||
|
@patch("contrib.testRender.run")
|
||||||
|
def test_check_call_prints_exception_output_if_verbose(self, m_run, m_out):
|
||||||
|
m_run.side_effect = CalledProcessError(
|
||||||
|
1, "python program.js", output="SyntaxError: invalid syntax")
|
||||||
|
with self.assertRaises(CalledProcessError):
|
||||||
|
test_render.check_call(["python", "program.js"], verbose=True)
|
||||||
|
self.assertEqual(m_out.getvalue().strip(), "SyntaxError: invalid syntax")
|
||||||
|
|
||||||
|
@patch("contrib.testRender.run")
|
||||||
|
def test_check_output_captures_stdout(self, m_run):
|
||||||
|
test_render.check_call(["python", "program.py"])
|
||||||
|
args, kwargs = m_run.call_args
|
||||||
|
self.assertEqual(kwargs['stdout'], PIPE)
|
||||||
|
|
||||||
|
@patch('contrib.testRender.check_output')
|
||||||
|
def test_get_commits(self, m_check_output):
|
||||||
|
gitrange = '2eca1a5fb5fa7eeb5494abb350cd535f67acfb8b..08a86a52abfabd59ac68b37dc7e5270bd7fb328a'
|
||||||
|
m_check_output.return_value = (
|
||||||
|
"commit 2eca1a5fb5fa7eeb5494abb350cd535f67acfb8b\nAuthor: Andrew "
|
||||||
|
"<andrew@fry.(none)>\nDate: Sun Aug 22 10:16:10 2010 -0400\n\n "
|
||||||
|
" initial comit\n\n:000000 100644 0000000 c398ada A\tchunk.py\n:000000 "
|
||||||
|
"100644 0000000 d5ee6ed A\tnbt.py\n:000000 100644 0000000 8fc65c9 A\ttextures.py\n:"
|
||||||
|
"000000 100644 0000000 6934326 A\tworld.py\n\ncommit 08a86a52abfabd59ac68b37dc7e5270bd7fb328a"
|
||||||
|
"\nAuthor: Andrew <andrew@fry.(none)>\nDate: Tue Aug 24 21:11:57 2010 -0400\n\n "
|
||||||
|
"uses multiprocessing to speed up rendering. Caches chunks\n\n:1"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = list(test_render.get_commits(gitrange))
|
||||||
|
self.assertListEqual(result, ['2eca1a5fb5fa7eeb5494abb350cd535f67acfb8b',
|
||||||
|
'08a86a52abfabd59ac68b37dc7e5270bd7fb328a'])
|
||||||
|
|
||||||
|
@patch('contrib.testRender.check_output', return_value="my-feature-branch")
|
||||||
|
def test_get_current_branch(self, m_check_output):
|
||||||
|
self.assertEqual(test_render.get_current_branch(), "my-feature-branch")
|
||||||
|
|
||||||
|
@patch('contrib.testRender.check_output', return_value="HEAD")
|
||||||
|
def test_get_current_branch_returns_none_for_detached_head(self, m_check_output):
|
||||||
|
self.assertIsNone(test_render.get_current_branch())
|
||||||
|
|
||||||
|
@patch('contrib.testRender.check_output', return_value="3f1f3d748e1c79843279ba18ab65a34368b95b67")
|
||||||
|
def test_get_current_commit(self, m_check_output):
|
||||||
|
self.assertEqual(
|
||||||
|
test_render.get_current_branch(),
|
||||||
|
"3f1f3d748e1c79843279ba18ab65a34368b95b67"
|
||||||
|
)
|
||||||
|
|
||||||
|
@patch('contrib.testRender.get_current_branch', return_value="my-feature-branch")
|
||||||
|
def test_get_current_ref_returns_branch_name_if_possible(self, m_branch):
|
||||||
|
self.assertEqual(test_render.get_current_ref(), "my-feature-branch")
|
||||||
|
|
||||||
|
@patch('contrib.testRender.get_current_commit', return_value="3f1f3d748e1c79843279ba18ab65a34368b95b67")
|
||||||
|
@patch('contrib.testRender.get_current_branch', return_value=None)
|
||||||
|
def test_get_current_ref_returns_current_commit_if_no_branch(self, m_branch, m_commit):
|
||||||
|
self.assertEqual(
|
||||||
|
test_render.get_current_ref(),
|
||||||
|
"3f1f3d748e1c79843279ba18ab65a34368b95b67"
|
||||||
|
)
|
||||||
|
|
||||||
|
@patch('contrib.testRender.check_output')
|
||||||
|
def test_get_commits(self, m_check_output):
|
||||||
|
m_check_output.return_value = "\n".join(
|
||||||
|
[
|
||||||
|
"41ceaeab58473416bb79680ab21211764e6f1908",
|
||||||
|
"a4d0daa91c25a51ca95182301e503c020900dafe",
|
||||||
|
"05906c81f5778a543dfab14e77231db0a99bae24",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
gitrange = "41ceaeab58473416bb79680ab21211764e6f1908..05906c81f5778a543dfab14e77231db0a99bae24"
|
||||||
|
result = list(test_render.get_commits(gitrange))
|
||||||
|
self.assertListEqual(
|
||||||
|
result,
|
||||||
|
[
|
||||||
|
"41ceaeab58473416bb79680ab21211764e6f1908",
|
||||||
|
"a4d0daa91c25a51ca95182301e503c020900dafe",
|
||||||
|
"05906c81f5778a543dfab14e77231db0a99bae24"
|
||||||
|
]
|
||||||
|
)
|
||||||
Reference in New Issue
Block a user