0

Merge pull request #1545 from SteadBytes/contrib-python3

Move Contrib Scripts to Python3
This commit is contained in:
Nicolas F
2019-04-07 14:27:59 +02:00
committed by GitHub
16 changed files with 1078 additions and 623 deletions

View File

@@ -1,7 +1,6 @@
language: python
dist: xenial
python:
- "3.4"
- "3.5"
- "3.6"
- "3.7"
@@ -14,6 +13,7 @@ before_install:
install:
- pip install -q pillow
- pip install -q numpy
- pip install -q networkx
- python3 setup.py build
before_script:
- git clone git://github.com/overviewer/Minecraft-Overviewer-Addons.git ~/mcoa/

189
contrib/contributors.py Executable file → Normal file
View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/env python3
"""Update the contributor list
Alias handling is done by git with .mailmap
@@ -6,110 +6,133 @@ New contributors are merged in the short-term list.
Moving them to a "higher" list should be a manual process.
"""
import fileinput
from subprocess import Popen, PIPE
import re
from pathlib import Path
import subprocess
CONTRIB_FILE_CONTRIBUTOR_RE = re.compile(r'\* (.+) (<.+>)')
def format_contributor(contributor):
return " * {0} {1}".format(
" ".join(contributor["name"]),
contributor["email"])
return " * {0} {1}".format(contributor["name"], contributor["email"])
def main():
# generate list of contributors
def get_contributors():
""" Parse all contributors from output of git shortlog -se
"""
contributors = []
p_git = Popen(["git", "shortlog", "-se"], stdout=PIPE)
for line in p_git.stdout:
contributors.append({
'count': int(line.split("\t")[0].strip()),
'name': line.split("\t")[1].split()[0:-1],
'email': line.split("\t")[1].split()[-1]
p_git = subprocess.run(["git", "shortlog", "-se"], stdout=subprocess.PIPE)
for line in p_git.stdout.decode('utf-8').split('\n'):
m = re.search(r"(\d+)\t(.+) (<.+>)", line)
if m:
contributors.append({
"count": int(m.group(1)),
"name": m.group(2),
"email": m.group(3)
})
return contributors
# cache listed contributors
def get_old_contributors(contrib_file_lines):
""" Parse existing contributors from CONTRIBUTORS.rst
Returns:
(list) Contributors as {"name", "email"} dicts
"""
old_contributors = []
with open("CONTRIBUTORS.rst", "r") as contrib_file:
for line in contrib_file:
if "@" in line:
old_contributors.append({
'name': line.split()[1:-1],
'email': line.split()[-1]
})
for line in contrib_file_lines:
m = CONTRIB_FILE_CONTRIBUTOR_RE.search(line)
if m:
old_contributors.append({"name": m.group(1), "email": m.group(2)})
return old_contributors
old = map(lambda x: (x['name'], x['email']), old_contributors)
old_emails = map(lambda x: x['email'], old_contributors)
old_names = map(lambda x: x['name'], old_contributors)
# check which contributors are new
def get_new_contributors(contributors, old_contributors):
""" Find new contributors and any possible alias or email changes
Returns:
(tuple) list of new contributors,
list of new aliases as (contributor, existing_name),
list of new emails as (contributor, existing_email)
"""
old_email_names = {c['email']: c['name'] for c in old_contributors}
old_name_emails = {c['name']: c['email'] for c in old_contributors}
new_contributors = []
update_mailmap = False
new_alias = []
new_email = []
for contributor in contributors:
if (contributor['name'], contributor['email']) in old:
# this exact combination already in the list
name, email = contributor['name'], contributor['email']
existing_name, existing_email = old_email_names.get(email), old_name_emails.get(name)
if existing_name == name and existing_email == email:
# exact combination already in list
pass
elif (contributor['email'] not in old_emails
and contributor['name'] not in old_names):
# name AND email are not in the list
elif existing_name is None and existing_email is None:
new_contributors.append(contributor)
elif contributor['email'] in old_emails:
# email is listed, but with another name
old_name = filter(lambda x: x['email'] == contributor['email'],
old_contributors)[0]['name']
print "new alias %s for %s %s ?" % (
" ".join(contributor['name']),
" ".join(old_name),
contributor['email'])
update_mailmap = True
elif contributor['name'] in old_names:
# probably a new email for a previous contributor
other_mail = filter(lambda x: x['name'] == contributor['name'],
old_contributors)[0]['email']
print "new email %s for %s %s ?" % (
contributor['email'],
" ".join(contributor['name']),
other_mail)
update_mailmap = True
if update_mailmap:
print "Please update .mailmap"
elif existing_name is not None:
new_alias.append((contributor, existing_name))
elif existing_email is not None:
new_email.append((contributor, existing_email))
return (
sorted(new_contributors, key=lambda x: x['name'].split()[-1].lower()),
new_alias,
new_email
)
# sort on the last word of the name
new_contributors = sorted(new_contributors,
key=lambda x: x['name'][-1].lower())
# show new contributors to be merged to the list
if new_contributors:
print "inserting:"
for contributor in new_contributors:
print format_contributor(contributor)
def merge_short_term_contributors(contrib_file_lines, new_contributors):
""" Merge new contributors into Short-term Contributions section in
alphabetical order.
# merge with alphabetical (by last part of name) contributor list
i = 0
Returns:
(list) Lines including new contributors for writing to CONTRIBUTORS.rst
"""
short_term_found = False
for line in fileinput.input("CONTRIBUTORS.rst", inplace=1):
for (i, line) in enumerate(contrib_file_lines):
if not short_term_found:
print line,
if "Short-term" in line:
short_term_found = True
else:
if i >= len(new_contributors) or "@" not in line:
print line,
else:
listed_name = line.split()[-2].lower()
contributor = new_contributors[i]
# insert all new contributors that fit here
while listed_name > contributor["name"][-1].lower():
print format_contributor(contributor)
i += 1
if i < len(new_contributors):
contributor = new_contributors[i]
else:
break
print line,
# append remaining contributors
with open("CONTRIBUTORS.rst", "a") as contrib_file:
while i < len(new_contributors):
contrib_file.write(format_contributor(new_contributors[i]) + "\n")
i += 1
if CONTRIB_FILE_CONTRIBUTOR_RE.search(line):
break
short_term_contributor_lines = [l for l in contrib_file_lines[i:] if l] + \
[format_contributor(c) + "\n" for c in new_contributors]
def last_name_sort(contrib_line):
m = CONTRIB_FILE_CONTRIBUTOR_RE.search(contrib_line)
return m.group(1).split()[-1].lower()
return contrib_file_lines[:i] + sorted(short_term_contributor_lines, key=last_name_sort)
def main():
contrib_file = Path("CONTRIBUTORS.rst")
with contrib_file.open() as f:
contrib_file_lines = f.readlines()
old_contributors = get_old_contributors(contrib_file_lines)
contributors = get_contributors()
new_contributors, new_alias, new_email = get_new_contributors(contributors, old_contributors)
for contributor, old_name in new_alias:
print("new alias {0} for {1} {2} ?".format(
contributor['name'], old_name, contributor['email']))
for contributor, old_email in new_email:
print("new email {0} for {1} {2} ?".format(
contributor['email'], contributor['name'], old_email))
if new_alias or new_email:
print("Please update .mailmap")
if new_contributors:
print("inserting:")
print("\n".join([format_contributor(c) for c in new_contributors]))
with contrib_file.open("w") as f:
f.writelines(merge_short_term_contributors(contrib_file_lines, new_contributors))
if __name__ == "__main__":

View File

@@ -1,92 +1,93 @@
#!/usr/bin/python
#!/usr/bin/env python3
"""Convert gibberish back into Cyrillic"""
import fileinput
import os
import argparse
import sys
usage = """
If you have signs that should be Cyrillic, but are instead gibberish,
this script will convert it back to proper Cyrillic.
usage: python %(script)s <markers.js>
ex. python %(script)s C:\\Inetpub\\www\\map\\markers.js
or %(script)s /srv/http/map/markers.js
""" % {'script': os.path.basename(sys.argv[0])}
if len(sys.argv) < 2:
sys.exit(usage)
gibberish_to_cyrillic = {
r"\u00c0": r"\u0410",
r"\u00c1": r"\u0411",
r"\u00c2": r"\u0412",
r"\u00c3": r"\u0413",
r"\u00c4": r"\u0414",
r"\u00c5": r"\u0415",
r"\u00c6": r"\u0416",
r"\u00c7": r"\u0417",
r"\u00c8": r"\u0418",
r"\u00c9": r"\u0419",
r"\u00ca": r"\u041a",
r"\u00cb": r"\u041b",
r"\u00cc": r"\u041c",
r"\u00cd": r"\u041d",
r"\u00ce": r"\u041e",
r"\u00cf": r"\u041f",
r"\u00d0": r"\u0420",
r"\u00d1": r"\u0421",
r"\u00d2": r"\u0422",
r"\u00d3": r"\u0423",
r"\u00d4": r"\u0424",
r"\u00d5": r"\u0425",
r"\u00d6": r"\u0426",
r"\u00d7": r"\u0427",
r"\u00d8": r"\u0428",
r"\u00d9": r"\u0429",
r"\u00da": r"\u042a",
r"\u00db": r"\u042b",
r"\u00dc": r"\u042c",
r"\u00dd": r"\u042d",
r"\u00de": r"\u042e",
r"\u00df": r"\u042f",
r"\u00e0": r"\u0430",
r"\u00e1": r"\u0431",
r"\u00e2": r"\u0432",
r"\u00e3": r"\u0433",
r"\u00e4": r"\u0434",
r"\u00e5": r"\u0435",
r"\u00e6": r"\u0436",
r"\u00e7": r"\u0437",
r"\u00e8": r"\u0438",
r"\u00e9": r"\u0439",
r"\u00ea": r"\u043a",
r"\u00eb": r"\u043b",
r"\u00ec": r"\u043c",
r"\u00ed": r"\u043d",
r"\u00ee": r"\u043e",
r"\u00ef": r"\u043f",
r"\u00f0": r"\u0440",
r"\u00f1": r"\u0441",
r"\u00f2": r"\u0442",
r"\u00f3": r"\u0443",
r"\u00f4": r"\u0444",
r"\u00f5": r"\u0445",
r"\u00f6": r"\u0446",
r"\u00f7": r"\u0447",
r"\u00f8": r"\u0448",
r"\u00f9": r"\u0449",
r"\u00fa": r"\u044a",
r"\u00fb": r"\u044b",
r"\u00fc": r"\u044c",
r"\u00fd": r"\u044d",
r"\u00fe": r"\u044e",
r"\u00ff": r"\u044f"
'À': 'А',
'Á': 'Б',
'Â': 'В',
'Ã': 'Г',
'Ä': 'Д',
'Å': 'Е',
'Æ': 'Ж',
'Ç': 'З',
'È': 'И',
'É': 'Й',
'Ê': 'К',
'Ë': 'Л',
'Ì': 'М',
'Í': 'Н',
'Î': 'О',
'Ï': 'П',
'Ð': 'Р',
'Ñ': 'С',
'Ò': 'Т',
'Ó': 'У',
'Ô': 'Ф',
'Õ': 'Х',
'Ö': 'Ц',
'×': 'Ч',
'Ø': 'Ш',
'Ù': 'Щ',
'Ú': 'Ъ',
'Û': 'Ы',
'Ü': 'Ь',
'Ý': 'Э',
'Þ': 'Ю',
'ß': 'Я',
'à': 'а',
'á': 'б',
'â': 'в',
'ã': 'г',
'ä': 'д',
'å': 'е',
'æ': 'ж',
'ç': 'з',
'è': 'и',
'é': 'й',
'ê': 'к',
'ë': 'л',
'ì': 'м',
'í': 'н',
'î': 'о',
'ï': 'п',
'ð': 'р',
'ñ': 'с',
'ò': 'т',
'ó': 'у',
'ô': 'ф',
'õ': 'х',
'ö': 'ц',
'÷': 'ч',
'ø': 'ш',
'ù': 'щ',
'ú': 'ъ',
'û': 'ы',
'ü': 'ь',
'ý': 'э',
'þ': 'ю',
'ÿ': 'я'
}
trans_table = {ord(k): v for k, v in gibberish_to_cyrillic.items()}
for line in fileinput.FileInput(inplace=1):
for i, j in gibberish_to_cyrillic.iteritems():
line = line.replace(i, j)
sys.stdout.write(line)
def convert(s):
return s.translate(trans_table)
if __name__ == '__main__':
description = """
If you have signs that should be Cyrillic, but are instead gibberish,
this script will convert it back to proper Cyrillic.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', metavar='markers.js')
args = parser.parse_args()
convert(args.markers_file)
for line in fileinput.input(files=markers_file, inplace=1):
print(convert(s), end='')

View File

@@ -1,40 +1,49 @@
#!/usr/bin/env python3
"""
Outputs a huge image with all currently-supported block textures.
"""
from overviewer_core import textures
import argparse
from PIL import Image
import sys
import Image
import os
if len(sys.argv) != 2:
print "usage: %s [output.png]" % (sys.argv[0],)
sys.exit(1)
# incantation to be able to import overviewer_core
if not hasattr(sys, "frozen"):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
t = textures.Textures()
t.generate()
blocks = {}
def main(outfile):
from overviewer_core import textures
t = textures.Textures()
t.generate()
for blockid in xrange(textures.max_blockid):
for data in xrange(textures.max_data):
tex = t.blockmap[blockid * textures.max_data + data]
if tex:
if not blockid in blocks:
blocks[blockid] = {}
blocks[blockid][data] = tex
blocks = {}
columns = max(map(len, blocks.values()))
rows = len(blocks)
texsize = t.texture_size
for blockid in range(textures.max_blockid):
for data in range(textures.max_data):
tex = t.blockmap[blockid * textures.max_data + data]
if tex:
if blockid not in blocks:
blocks[blockid] = {}
blocks[blockid][data] = tex
gallery = Image.new("RGBA", (columns * texsize, rows * texsize), t.bgcolor)
columns = max(len(v) for v in blocks.values())
rows = len(blocks)
texsize = t.texture_size
row = 0
for blockid, textures in blocks.iteritems():
column = 0
for data, tex in textures.iteritems():
gallery.paste(tex[0], (column * texsize, row * texsize))
column += 1
row += 1
gallery = Image.new("RGBA", (columns * texsize, rows * texsize), t.bgcolor)
gallery.save(sys.argv[1])
for row, (blockid, textures) in enumerate(blocks.items()):
for column, (data, tex) in enumerate(textures.items()):
gallery.paste(tex[0], (column * texsize, row * texsize))
gallery.save(outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file', metavar='output.png')
args = parser.parse_args()
main(args.file)

View File

@@ -1,20 +1,21 @@
#!/usr/bin/env python3
"""
Very basic player.dat inspection script
"""
from __future__ import print_function
import os
import sys
import argparse
from pathlib import Path
# incantation to be able to import overviewer_core
if not hasattr(sys, "frozen"):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
from overviewer_core.nbt import load
from overviewer_core import items
def print_player(data, sub_entry=False):
indent = ""
if sub_entry:
@@ -36,26 +37,58 @@ def print_player(data, sub_entry=False):
print(" %-3d %s" % (item['Count'], items.id2item(item['id'])))
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Usage: {} <Player .dat or directory> [selected player]"
.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
print("Inspecting %s" % sys.argv[1])
def find_all_player_files(dir_path):
for player_file in dir_path.iterdir():
player = player_file.stem
yield player_file, player
if os.path.isdir(sys.argv[1]):
directory = sys.argv[1]
if len(sys.argv) > 2:
selected_player = sys.argv[2]
else:
selected_player = None
for player_file in os.listdir(directory):
player = player_file.split(".")[0]
if selected_player in [None, player]:
print("")
print(player)
data = load(os.path.join(directory, player_file))[1]
print_player(data, sub_entry=(selected_player is None))
else:
data = load(sys.argv[1])[1]
print_player(data)
def find_player_file(dir_path, selected_player):
for player_file, player in find_all_player_files(dir_path):
if selected_player == player:
return player_file, player
raise FileNotFoundError()
def load_and_output_player(player_file_path, player, sub_entry=False):
with player_file_path.open('rb') as f:
player_data = load(f)[1]
print("")
print(player)
print_player(player_data, sub_entry=sub_entry)
def dir_or_file(path):
p = Path(path)
if not p.is_file() and not p.is_dir():
raise argparse.ArgumentTypeError("Not a valid file or directory path")
return p
def main(path, selected_player=None):
print("Inspecting %s" % args.path)
if not path.is_dir():
load_and_output_player(args.path)
return
if selected_player is None:
for player_file, player in find_all_player_files(args.path):
load_and_output_player(player_file, player)
return
try:
player_file, player = find_player_file(args.path, args.selected_player)
load_and_output_player(player_file, player, sub_entry=True)
except FileNotFoundError:
print("No %s.dat in %s" % (args.selected_player, args.path))
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('path', metavar='<Player.dat or directory>', type=dir_or_file)
parser.add_argument('selected_player', nargs='?', default=None)
args = parser.parse_args()
main(args.path, selected_player=args.selected_player)

View File

@@ -1,40 +1,51 @@
#!/usr/bin/env python
#!/usr/bin/env python3
"""Deletes outlying and unconnected regions"""
import argparse
import logging
import os
import sys
import glob
from pathlib import Path
import networkx
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def get_region_file_from_node(regionset_path, node):
return os.path.join(regionset_path, 'r.%d.%d.mca' % node)
return regionset_path / ('r.%d.%d.mca' % node)
def get_nodes(regionset_path):
return [tuple(map(int, r.split('.')[1:3])) \
for r in glob.glob(os.path.join(regionset_path, 'r.*.*.mca'))]
return [
tuple(int(x) for x in r.stem.split('.')[1:3])
for r in regionset_path.glob('r.*.*.mca')
]
def generate_edges(graph):
offsets = (-1, 1)
nodes = graph.nodes()
for node in nodes:
for offset in offsets:
graph.add_edges_from((node, offset_node) for offset_node in \
[(node[0] + offset, node[1]), (node[0], node[1] + offset), \
(node[0] + offset, node[1] + offset)] \
if offset_node in nodes)
graph.add_edges_from(
(node, offset_node)
for offset_node in [
(node[0] + offset, node[1]),
(node[0], node[1] + offset),
(node[0] + offset, node[1] + offset),
]
if offset_node in nodes
)
return graph
def generate_subgraphs(nodes):
graph = networkx.Graph()
graph.add_nodes_from(nodes)
generate_edges(graph)
return graph, networkx.connected_component_subgraphs(graph)
return graph, [graph.subgraph(c) for c in networkx.connected_components(graph)]
def get_graph_bounds(graph):
nodes = graph.nodes()
@@ -45,114 +56,130 @@ def get_graph_bounds(graph):
min(n[1] for n in nodes),
)
def get_graph_center_by_bounds(bounds):
dx = bounds[0] - bounds[1]
dy = bounds[2] - bounds[3]
return (dx / 2 + bounds[1], dy / 2 + bounds[3])
return (dx // 2 + bounds[1], dy // 2 + bounds[3])
def main(*args, **options):
if len(args) < 1:
logger.error('Missing region directory argument')
return
for path in args:
logger.info('Using regionset path: %s', path)
def trim_regions(graph, regions_path, dry_run=True, filter_func=lambda n: True):
regions = [
(n, get_region_file_from_node(regions_path, n))
for n in graph.nodes()
if filter_func(n)
]
logger.info("Trimming regions: %s", ", ".join(x[1] for x in regions))
for n, region_file in regions:
graph.remove_node(n)
if dry_run is False:
unlink_file(region_file)
def is_outside_main(center, main_section_bounds):
return center[0] <= main_section_bounds[0] and center[0] >= main_section_bounds[1] and \
center[1] <= main_section_bounds[2] and center[1] >= main_section_bounds[3]
def is_outside_bounds(node, trim_center, trim_bounds):
return node[0] >= trim_center[0] + trim_bounds[0] or \
node[0] <= trim_center[0] - trim_bounds[0] or \
node[1] >= trim_center[1] + trim_bounds[1] or \
node[1] <= trim_center[1] - trim_bounds[1]
def unlink_file(path):
try:
path.unlink()
except OSError as err:
logger.warning("Unable to delete file: %s", path)
logger.warning("Error recieved was: %s", err)
def main(args):
for path in args.paths:
logger.info("Using regionset path: %s", path)
nodes = get_nodes(path)
if not len(nodes):
logger.error('Found no nodes, are you sure there are .mca files in %s ?',
path)
logger.error("Found no nodes, are you sure there are .mca files in %s ?",
path)
return
logger.info('Found %d nodes', len(nodes))
logger.info('Generating graphing nodes...')
logger.info("Found %d nodes", len(nodes))
logger.info("Generating graphing nodes...")
graph, subgraphs = generate_subgraphs(nodes)
assert len(graph.nodes()) == sum(len(sg.nodes()) for sg in subgraphs)
if len(subgraphs) == 1:
logger.warn('All regions are contiguous, the needful is done!')
logger.warning("All regions are contiguous, the needful is done!")
return
logger.info('Found %d discrete region sections', len(subgraphs))
logger.info("Found %d discrete region sections", len(subgraphs))
subgraphs = sorted(subgraphs, key=lambda sg: len(sg), reverse=True)
for i, sg in enumerate(subgraphs):
logger.info('Region section #%02d: %04d nodes', i+1, len(sg.nodes()))
logger.info("Region section #%02d: %04d nodes", i + 1, len(sg.nodes()))
bounds = get_graph_bounds(sg)
logger.info('Bounds: %d <-> %d x %d <-> %d', *get_graph_bounds(sg))
logger.info("Bounds: %d <-> %d x %d <-> %d", *get_graph_bounds(sg))
center = get_graph_center_by_bounds(bounds)
logger.info('Center: %d x %d', *center)
logger.info("Center: %d x %d", *center)
main_section = subgraphs[0]
main_section_bounds = get_graph_bounds(main_section)
main_section_center = get_graph_center_by_bounds(main_section_bounds)
logger.info('Using %d node graph as main section,', len(main_section.nodes()))
logger.info("Using %d node graph as main section,", len(main_section.nodes()))
satellite_sections = subgraphs[1:]
for ss in satellite_sections:
bounds = get_graph_bounds(ss)
center = get_graph_center_by_bounds(bounds)
logger.info('Checking satellite section with %d nodes, %d <-> %d x %d <-> %d bounds and %d x %d center',
len(ss.nodes()), *(bounds + center))
if options['trim_disconnected']:
logger.info('Trimming regions: %s', ', '.join(
get_region_file_from_node(path, n) for n in ss.nodes()))
for n, region_file in ((n, get_region_file_from_node(path, n)) \
for n in ss.nodes()):
ss.remove_node(n)
if not options['dry_run']:
unlink_file(region_file)
if options['trim_outside_main']:
if center[0] <= main_section_bounds[0] and center[0] >= main_section_bounds[1] and \
center[1] <= main_section_bounds[2] and center[1] >= main_section_bounds[3]:
logger.info('Section falls inside main section bounds, ignoring')
logger.info(("Checking satellite section with %d nodes, "
"%d <-> %d x %d <-> %d bounds and %d x %d center"),
len(ss.nodes()), *(bounds + center))
if args.trim_disconnected:
trim_regions(ss, path, dry_run=args.dry_run)
if args.trim_outside_main:
if is_outside_main(ss, center, main_section_bounds):
logger.info("Section is outside main section bounds")
trim_regions(ss, path, dry_run=args.dry_run)
else:
logger.info('Section is outside main section bounds')
logger.info('Trimming regions: %s', ', '.join(
get_region_file_from_node(path, n) for n in ss.nodes()))
for n, region_file in ((n, get_region_file_from_node(path, n)) \
for n in ss.nodes()):
ss.remove_node(n)
if not options['dry_run']:
unlink_file(region_file)
if options['trim_outside_bounds']:
x = map(int, options['trim_outside_bounds'].split(','))
if len(x) == 4:
trim_center = x[:2]
trim_bounds = x[2:]
elif len(x) == 2:
trim_center = main_section_center
trim_bounds = x
else:
logger.error('Invalid center/bound value: %s',
options['trim_outside_bounds'])
continue
for node in ss.nodes():
if node[0] >= trim_center[0] + trim_bounds[0] or \
node[0] <= trim_center[0] - trim_bounds[0] or \
node[1] >= trim_center[1] + trim_bounds[1] or \
node[1] <= trim_center[1] - trim_bounds[1]:
region_file = get_region_file_from_node(path, node)
logger.info('Region falls outside specified bounds, trimming: %s',
region_file)
ss.remove_node(node)
if not options['dry_run']:
unlink_file(region_file)
logger.info("Section falls inside main section bounds, ignoring")
def unlink_file(path):
try:
os.unlink(path)
except OSError as err:
logger.warn('Unable to delete file: %s', path)
logger.warn('Error recieved was: %s', err)
if args.trim_outside_bounds:
logger.info("Checking regions outside specified bounds")
trim_center = args.trim_outside_bounds.get("center", main_section_center)
trim_bounds = args.trim_outside_bounds["bounds"]
trim_regions(ss, path, dry_run=args.dry_run,
filter_func=lambda n: is_outside_bounds(n, trim_center, trim_bounds))
if __name__ == '__main__':
import optparse
def dir_path(path):
p = Path(path)
if not p.is_dir():
raise argparse.ArgumentTypeError("Not a valid directory path")
return p
def center_bound(value):
x = [int(v) for v in value.split(",")]
if len(x) == 4:
return {"center": x[:2], "bounds": x[2:]}
elif len(x) == 2:
return {"bounds": x}
else:
raise argparse.ArgumentTypeError("Invalid center/bound value")
if __name__ == "__main__":
logging.basicConfig()
parser = optparse.OptionParser(
usage='Usage: %prog [options] <path/to/region/directory>')
parser.add_option('-D', '--trim-disconnected', action='store_true', default=False,
help='Trim all disconnected regions')
parser.add_option('-M', '--trim-outside-main', action='store_true', default=False,
help='Trim disconnected regions outside main section bounds')
parser.add_option('-B', '--trim-outside-bounds', default=False,
metavar='[center_X,center_Y,]bound_X,bound_Y',
help='Trim outside given bounds (given as [center_X,center_Y,]bound_X,bound_Y)')
parser.add_option('-n', '--dry-run', action='store_true', default=False,
help='Don\'t actually delete anything')
opts, args = parser.parse_args()
main(*args, **vars(opts))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("paths", metavar="<path/to/region/directory>", nargs="+", type=dir_path)
parser.add_argument("-D", "--trim-disconnected", action="store_true",
default=False, help="Trim all disconnected regions")
parser.add_argument("-M", "--trim-outside-main", action="store_true",
default=False, help="Trim disconnected regions outside main section bounds")
parser.add_argument("-B", "--trim-outside-bounds",
metavar="[center_X,center_Y,]bound_X,bound_Y", type=center_bound,
help=("Trim outside given bounds "
"(given as [center_X,center_Y,]bound_X,bound_Y)"))
parser.add_argument("-n", "--dry-run", action="store_true", default=False,
help="Don't actually delete anything")
args = parser.parse_args()
main(args)

View File

@@ -1,69 +0,0 @@
#!/usr/bin/python
'''
Generate a region list to rerender certain chunks
This is used to force the regeneration of any chunks that contain a certain
blockID. The output is a chunklist file that is suitable to use with the
--chunklist option to overviewer.py.
Example:
python contrib/rerenderBlocks.py --ids=46,79,91 --world=world/> regionlist.txt
python overviewer.py --regionlist=regionlist.txt world/ output_dir/
This will rerender any chunks that contain either TNT (46), Ice (79), or
a Jack-O-Lantern (91)
'''
from optparse import OptionParser
import sys,os
import re
# incantation to be able to import overviewer_core
if not hasattr(sys, "frozen"):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
from overviewer_core import nbt
from overviewer_core import world
from overviewer_core.chunk import get_blockarray
parser = OptionParser()
parser.add_option("--ids", dest="ids", type="string")
parser.add_option("--world", dest="world", type="string")
options, args = parser.parse_args()
if not options.world or not options.ids:
parser.print_help()
sys.exit(1)
if not os.path.exists(options.world):
raise Exception("%s does not exist" % options.world)
ids = map(lambda x: int(x),options.ids.split(","))
sys.stderr.write("Searching for these blocks: %r...\n" % ids)
matcher = re.compile(r"^r\..*\.mcr$")
for dirpath, dirnames, filenames in os.walk(options.world):
for f in filenames:
if matcher.match(f):
full = os.path.join(dirpath, f)
r = nbt.load_region(full, 'lower-left')
chunks = r.get_chunks()
found = False
for x,y in chunks:
chunk = r.load_chunk(x,y).read_all()
blocks = get_blockarray(chunk[1]['Level'])
for i in ids:
if chr(i) in blocks:
print full
found = True
break
if found:
break

View File

@@ -1,43 +1,47 @@
#!/usr/bin/python
#!/usr/bin/env python3
"Test Render Script"
import os, shutil, tempfile, time, sys, math, re
from subprocess import Popen, PIPE, STDOUT, CalledProcessError
from optparse import OptionParser
import argparse
import math
import os
import re
import shutil
import sys
import tempfile
import time
from shlex import split
from subprocess import PIPE, STDOUT, CalledProcessError, run
overviewer_scripts = ['./overviewer.py', './gmap.py']
def check_call(*args, **kwargs):
quiet = False
if "quiet" in kwargs.keys():
quiet = kwargs["quiet"]
del kwargs["quiet"]
if quiet:
kwargs['stdout'] = PIPE
kwargs['stderr'] = STDOUT
p = Popen(*args, **kwargs)
output = ""
if quiet:
while p.poll() == None:
output += p.communicate()[0]
returncode = p.wait()
if returncode:
if quiet:
print output
raise CalledProcessError(returncode, args)
return returncode
def check_output(*args, **kwargs):
kwargs['stdout'] = PIPE
# will hang for HUGE output... you were warned
p = Popen(*args, **kwargs)
returncode = p.wait()
if returncode:
raise CalledProcessError(returncode, args)
return p.communicate()[0]
def check_call(args, verbose=False):
try:
return run(
args,
check=True,
stdout=None if verbose else PIPE,
stderr=None if verbose else STDOUT,
universal_newlines=True,
)
except CalledProcessError as e:
if verbose:
print(e.output)
raise e
def clean_render(overviewerargs, quiet):
def check_output(args):
p = run(
args,
check=True,
stdout=PIPE,
universal_newlines=True
)
return p.stdout
def clean_render(overviewerargs, verbose=False):
tempdir = tempfile.mkdtemp('mc-overviewer-test')
overviewer_script = None
for script in overviewer_scripts:
@@ -47,115 +51,124 @@ def clean_render(overviewerargs, quiet):
if overviewer_script is None:
sys.stderr.write("could not find main overviewer script\n")
sys.exit(1)
try:
# check_call raises CalledProcessError when overviewer.py exits badly
check_call([sys.executable, 'setup.py', 'clean', 'build'], quiet=quiet)
check_call([sys.executable] + split("setup.py clean build"), verbose=verbose)
try:
check_call([sys.executable, overviewer_script, '-d'] + overviewerargs, quiet=quiet)
check_call([sys.executable, overviewer_script, '-d'] + overviewerargs, verbose=verbose)
except CalledProcessError:
pass
starttime = time.time()
check_call([sys.executable, overviewer_script,] + overviewerargs + [tempdir,], quiet=quiet)
check_call([sys.executable, overviewer_script] +
overviewerargs + [tempdir, ], verbose=verbose)
endtime = time.time()
return endtime - starttime
finally:
shutil.rmtree(tempdir, True)
def get_stats(timelist):
stats = {}
stats['count'] = len(timelist)
stats['minimum'] = min(timelist)
stats['maximum'] = max(timelist)
stats['average'] = sum(timelist) / float(len(timelist))
meandiff = map(lambda x: (x - stats['average'])**2, timelist)
stats['standard deviation'] = math.sqrt(sum(meandiff) / float(len(meandiff)))
return stats
commitre = re.compile('^commit ([a-z0-9]{40})$', re.MULTILINE)
branchre = re.compile('^\\* (.+)$', re.MULTILINE)
def get_stats(timelist):
average = sum(timelist) / float(len(timelist))
meandiff = [(x - average) ** 2 for x in timelist]
sd = math.sqrt(sum(meandiff) / len(meandiff))
return {
"count": len(timelist),
"minimum": min(timelist),
"maximum": max(timelist),
"average": average,
"standard deviation": sd
}
def get_current_branch():
gittext = check_output(split('git rev-parse --abbrev-ref HEAD'))
return gittext.strip() if gittext != "HEAD" else None
def get_current_commit():
gittext = check_output(['git', 'branch'])
match = branchre.search(gittext)
if match and not ("no branch" in match.group(1)):
return match.group(1)
gittext = check_output(['git', 'show', 'HEAD'])
match = commitre.match(gittext)
if match == None:
return None
return match.group(1)
gittext = check_output(split('git rev-parse HEAD'))
return gittext.strip() if gittext else None
def get_current_ref():
branch = get_current_branch()
if branch:
return branch
commit = get_current_commit()
if commit:
return commit
def get_commits(gitrange):
gittext = check_output(['git', 'log', '--raw', '--reverse', gitrange])
for match in commitre.finditer(gittext):
yield match.group(1)
gittext = check_output(split('git rev-list --reverse') + [gitrange, ])
return (c for c in gittext.split("\n"))
def set_commit(commit):
check_call(['git', 'checkout', commit], quiet=True)
check_call(split('git checkout') + [commit, ])
parser = OptionParser(usage="usage: %prog [options] -- [overviewer options/world]")
parser.add_option("-n", "--number", metavar="N",
action="store", type="int", dest="number", default=3,
help="number of renders per commit [default: 3]")
parser.add_option("-c", "--commits", metavar="RANGE",
action="append", type="string", dest="commits", default=[],
help="the commit (or range of commits) to test [default: current]")
parser.add_option("-v", "--verbose",
action="store_false", dest="quiet", default=True,
help="don't suppress overviewer output")
parser.add_option("-k", "--keep-going",
action="store_false", dest="fatal_errors", default=True,
help="don't stop testing when Overviewer croaks")
parser.add_option("-l", "--log", dest="log", default="", metavar="FILE",
help="log all test results to a file")
(options, args) = parser.parse_args()
def main(args):
commits = []
for commit in args.commits:
if '..' in commit:
commits = get_commits(commit)
else:
commits.append(commit)
if not commits:
commits = [get_current_ref(), ]
if len(args) == 0:
parser.print_help()
sys.exit(0)
log = None
if args.log:
log = args.log
commits = []
for commit in options.commits:
if '..' in commit:
commits = get_commits(commit)
else:
commits.append(commit)
if not commits:
commits = [get_current_commit(),]
reset_commit = get_current_ref()
try:
for commit in commits:
print("testing commit", commit)
set_commit(commit)
timelist = []
print(" -- "),
try:
for i in range(args.number):
sys.stdout.write(str(i + 1) + " ")
sys.stdout.flush()
timelist.append(clean_render(args.overviewer_args, verbose=args.verbose))
print("... done")
stats = get_stats(timelist)
print(stats)
if log:
log.write("%s %s\n" % (commit, repr(stats)))
except CalledProcessError as e:
if args.fatal_errors:
print(e)
print("Overviewer croaked, exiting...")
print("(to avoid this, use --keep-going)")
sys.exit(1)
finally:
set_commit(reset_commit)
if log:
log.close()
log = None
if options.log != "":
log = open(options.log, "w")
reset_commit = get_current_commit()
try:
for commit in commits:
print "testing commit", commit
set_commit(commit)
timelist = []
print " -- ",
try:
for i in range(options.number):
sys.stdout.write(str(i+1)+" ")
sys.stdout.flush()
timelist.append(clean_render(args, options.quiet))
print "... done"
stats = get_stats(timelist)
print stats
if log:
log.write("%s %s\n" % (commit, repr(stats)))
except CalledProcessError, e:
if options.fatal_errors:
print
print "Overviewer croaked, exiting..."
print "(to avoid this, use --keep-going)"
sys.exit(1)
finally:
set_commit(reset_commit)
if log:
log.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("overviewer_args", metavar="[overviewer options/world]", nargs="+")
parser.add_argument("-n", "--option", metavar="N", type=int, action="store",
dest="number", default=3, help="number of renders per commit [default: 3]")
parser.add_argument("-c", "--commits", metavar="RANGE",
action="append", type=str, dest="commits", default=[],
help="the commit (or range of commits) to test [default: current]")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="don't suppress overviewer output")
parser.add_argument("-k", "--keep-going",
action="store_false", dest="fatal_errors", default=True,
help="don't stop testing when Overviewer croaks")
parser.add_argument("-l", "--log", dest="log", type=argparse.FileType('w'), metavar="FILE",
help="log all test results to a file")
args = parser.parse_args()
main(args)

View File

@@ -1,114 +0,0 @@
#!/usr/bin/env python
'''
Validate a region file
TODO description here'''
import os
import sys
# incantation to be able to import overviewer_core
if not hasattr(sys, "frozen"):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
from overviewer_core import nbt
def check_region(region_filename):
chunk_errors = []
if not os.path.exists(region_filename):
raise Exception('Region file not found: %s' % region_filename)
try:
region = nbt.load_region(region_filename, 'lower-left')
except IOError, e:
raise Exception('Error loading region (%s): %s' % (region_filename, e))
try:
region.get_chunk_info(False)
chunks = region.get_chunks()
except IOError, e:
raise Exception('Error reading region header (%s): %s' % (region_filename, e))
except Exception, e:
raise Exception('Error reading region (%s): %s' % (region_filename, e))
for x,y in chunks:
try:
check_chunk(region, x, y)
except Exception, e:
chunk_errors.append(e)
return (chunk_errors, len(chunks))
def check_chunk(region, x, y):
try:
data = region.load_chunk(x ,y)
except Exception, e:
raise Exception('Error reading chunk (%i, %i): %s' % (x, y, e))
if data is None:
raise Exception('Chunk (%i, %i) is unexpectedly empty' % (x, y))
else:
try:
processed_data = data.read_all()
except Exception, e:
raise Exception('Error reading chunk (%i, %i) data: %s' % (x, y, e))
if processed_data == []:
raise Exception('Chunk (%i, %i) is an unexpectedly empty set' % (x, y))
if __name__ == '__main__':
try:
from optparse import OptionParser
parser = OptionParser(usage='python contrib/%prog [OPTIONS] <path/to/regions|path/to/regions/*.mcr|regionfile1.mcr regionfile2.mcr ...>',
description='This script will valide a minecraft region file for errors.')
parser.add_option('-v', dest='verbose', action='store_true', help='Print additional information.')
opts, args = parser.parse_args()
region_files = []
for path in args:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path, True):
for filename in filenames:
if filename.startswith('r.') and filename.endswith('.mcr'):
if filename not in region_files:
region_files.append(os.path.join(dirpath, filename))
elif opts.verbose:
print('Ignoring non-region file: %s' % os.path.join(dirpath, filename))
elif os.path.isfile(path):
dirpath,filename = os.path.split(path)
if filename.startswith('r.') and filename.endswith('.mcr'):
if path not in region_files:
region_files.append(path)
else:
print('Ignoring non-region file: %s' % path)
else:
if opts.verbose:
print('Ignoring arg: %s' % path)
if len(region_files) < 1:
print 'You must list at least one region file.'
parser.print_help()
sys.exit(1)
else:
overall_chunk_total = 0
bad_chunk_total = 0
bad_region_total = 0
for region_file in region_files:
try:
(chunk_errors, region_chunks) = check_region(region_file)
bad_chunk_total += len(chunk_errors)
overall_chunk_total += region_chunks
except Exception, e:
bad_region_total += 1
print('FAILED(%s): %s' % (region_file, e))
else:
if len(chunk_errors) is not 0:
print('WARNING(%s) Chunks: %i/%' % (region_file, region_chunks - len(chunk_errors), region_chunks))
if opts.verbose:
for error in chunk_errors:
print(error)
elif opts.verbose:
print ('PASSED(%s) Chunks: %i/%i' % (region_file, region_chunks - len(chunk_errors), region_chunks))
if opts.verbose:
print 'REGIONS: %i/%i' % (len(region_files) - bad_region_total, len(region_files))
print 'CHUNKS: %i/%i' % (overall_chunk_total - bad_chunk_total, overall_chunk_total)
except KeyboardInterrupt:
sys.exit(1)
except Exception, e:
print('ERROR: %s' % e)

View File

@@ -10,9 +10,7 @@ import sys
scripts = { # keys are names, values are scripts
"convertCyrillic": "cyrillic_convert.py",
"playerInspect": "playerInspect.py",
"rerenderBlocks": "rerenderBlocks.py",
"testRender": "testRender.py",
"validate": "validateRegionFile.py",
"pngit": "png-it.py",
"gallery": "gallery.py",
"regionTrimmer": "regionTrimmer.py",
@@ -21,9 +19,9 @@ scripts = { # keys are names, values are scripts
# you can symlink or hardlink contribManager.py to another name to have it
# automatically find the right script to run. For example:
# > ln -s contribManager.py validate.exe
# > chmod +x validate.exe
# > ./validate.exe -h
# > ln -s contribManager.py pngit.exe
# > chmod +x pngit.exe
# > ./pngit.exe -h
# figure out what script to execute

View File

@@ -2,7 +2,10 @@
import unittest
# For convenience
import sys,os,logging
import sys
import os
import logging
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
@@ -12,15 +15,23 @@ from test_rendertileset import RendertileSetTest
from test_settings import SettingsTest
from test_tileset import TilesetTest
from test_cache import TestLRU
from test_contributors import TestContributors
from test_cyrillic_convert import TestCyrillicConvert
from test_playerInspect import TestPlayerInspect
from test_regionTrimmer import TestRegionTrimmer
from test_testRender import TestTestRender
# DISABLE THIS BLOCK TO GET LOG OUTPUT FROM TILESET FOR DEBUGGING
if 0:
root = logging.getLogger()
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
root.addHandler(NullHandler())

160
test/test_contributors.py Normal file
View File

@@ -0,0 +1,160 @@
import unittest
from io import StringIO, BytesIO
from textwrap import dedent
from unittest.mock import patch
import contrib.contributors as contrib
class TestContributors(unittest.TestCase):
def setUp(self):
self.contrib_file_lines = dedent("""\
============
Contributors
============
This file contains a list of every person who has contributed code to
Overviewer.
---------------
Original Author
---------------
* Andrew Brown <brownan@gmail.com>
-------------------------
Long-term Contributions
-------------------------
These contributors have made many changes, over a fairly long time span, or
for many different parts of the code.
* Alejandro Aguilera <fenixin@lavabit.com>
------------------------
Short-term Contributions
------------------------
These contributors have made specific changes for a particular bug fix or
feature.
* 3decibels <3db@3decibels.net>""").split("\n")
def test_format_contributor_single_name(self):
contributor = {"name": "John", "email": "<john@gmail.com>"}
self.assertEqual(
contrib.format_contributor(contributor),
" * John <john@gmail.com>"
)
def test_format_contributor_multiple_names(self):
contributor = {"name": "John Smith", "email": "<john@gmail.com>"}
self.assertEqual(
contrib.format_contributor(contributor),
" * John Smith <john@gmail.com>"
)
def test_get_old_contributors(self):
expected = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
self.assertListEqual(contrib.get_old_contributors(self.contrib_file_lines), expected)
@patch('subprocess.run')
def test_get_contributors(self, mock_run):
mock_run.return_value.stdout = dedent("""\
1 3decibels <3db@3decibels.net>
585 Aaron Griffith <aargri@gmail.com>
1 Aaron1011 <aa1ronham@gmail.com>
""").encode()
expected = [{"count": 1, "name": "3decibels", "email": "<3db@3decibels.net>"},
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
self.assertListEqual(contrib.get_contributors(), expected)
def test_get_new_contributors_new_contributors_alphabetical_order(self):
contributors = [{"count": 1, "name": "3decibels", "email": "<3db@3decibels.net>"},
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
old_contributors = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
new_contributors, new_alias, new_email = contrib.get_new_contributors(
contributors, old_contributors)
self.assertListEqual(new_contributors, [{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}, {
"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"}])
def test_get_new_contributors_new_alias(self):
contributors = [{"count": 1, "name": "new_name", "email": "<3db@3decibels.net>"},
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
old_contributors = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
new_contributors, new_alias, new_email = contrib.get_new_contributors(
contributors, old_contributors)
self.assertListEqual(
new_alias, [({"count": 1, "name": "new_name", "email": "<3db@3decibels.net>"}, "3decibels")])
def test_get_new_contributors_new_email(self):
contributors = [{"count": 1, "name": "3decibels", "email": "<3db@3decibels.com>"},
{"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"},
{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}]
old_contributors = [{"name": "Andrew Brown", "email": "<brownan@gmail.com>"},
{"name": "Alejandro Aguilera", "email": "<fenixin@lavabit.com>"},
{"name": "3decibels", "email": "<3db@3decibels.net>"}]
new_contributors, new_alias, new_email = contrib.get_new_contributors(
contributors, old_contributors)
self.assertListEqual(
new_email, [({"count": 1, "name": "3decibels", "email": "<3db@3decibels.com>"}, "<3db@3decibels.net>")])
def test_merge_short_term_contributors(self):
new_contributors = [{"count": 1, "name": "Aaron1011", "email": "<aa1ronham@gmail.com>"}, {
"count": 585, "name": "Aaron Griffith", "email": "<aargri@gmail.com>"}]
expected = ['============',
'Contributors',
'============',
'',
'This file contains a list of every person who has contributed code to',
'Overviewer.',
'',
'---------------',
'Original Author',
'---------------',
'',
' * Andrew Brown <brownan@gmail.com>',
'',
'-------------------------',
'Long-term Contributions',
'-------------------------',
'',
'These contributors have made many changes, over a fairly long time span, or',
'for many different parts of the code.',
'',
' * Alejandro Aguilera <fenixin@lavabit.com>',
'',
'------------------------',
'Short-term Contributions',
'------------------------',
'',
'These contributors have made specific changes for a particular bug fix or',
'feature.',
'',
' * 3decibels <3db@3decibels.net>',
' * Aaron1011 <aa1ronham@gmail.com>\n',
' * Aaron Griffith <aargri@gmail.com>\n']
self.assertListEqual(contrib.merge_short_term_contributors(
self.contrib_file_lines, new_contributors), expected)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,11 @@
import unittest
import tempfile
from contrib.cyrillic_convert import convert
class TestCyrillicConvert(unittest.TestCase):
def test_convert(self):
gibberish = '{chunk: [-2, 0],y: 65,msg: "ðåëèãèè",x: -20,z: 4,type: "sign"}'
cyrillic = '{chunk: [-2, 0],y: 65,msg: "религии",x: -20,z: 4,type: "sign"}'
self.assertEqual(convert(gibberish), cyrillic)

176
test/test_playerInspect.py Normal file
View File

@@ -0,0 +1,176 @@
import unittest
from io import StringIO
from pathlib import Path
from textwrap import dedent
from unittest.mock import patch, MagicMock
import contrib.playerInspect as player_inspect
class TestPlayerInspect(unittest.TestCase):
def setUp(self):
self.player_data = {
'AbsorptionAmount': 0.0,
'Air': 300,
'Attributes': [
{'Base': 20.0, 'Name': 'generic.maxHealth'},
{'Base': 0.0, 'Name': 'generic.knockbackResistance'},
{'Base': 0.10000000149011612, 'Name': 'generic.movementSpeed'},
{'Base': 0.0, 'Name': 'generic.armor'},
{'Base': 0.0, 'Name': 'generic.armorToughness'},
{'Base': 1.0, 'Name': 'generic.attackDamage'},
{'Base': 4.0, 'Name': 'generic.attackSpeed'},
{'Base': 0.0, 'Name': 'generic.luck'}
],
'DataVersion': 1631,
'DeathTime': 0,
'Dimension': 0,
'EnderItems': [],
'FallDistance': 0.0,
'FallFlying': 0,
'Fire': -20,
'Health': 20.0,
'HurtByTimestamp': 0,
'HurtTime': 0,
'Inventory': [{'Count': 1, 'Slot': -106, 'id': 'minecraft:sign'}],
'Invulnerable': 0,
'Motion': [0.0, -0.0784000015258789, 0.0],
'OnGround': 1,
'PortalCooldown': 0,
'Pos': [-96.11859857363737, 70.0, -44.17768261916891],
'Rotation': [-72.00011444091797, 38.250030517578125],
'Score': 0,
'SelectedItemSlot': 0,
'SleepTimer': 0,
'Sleeping': 0,
"SpawnX": 10,
"SpawnY": 52,
"SpawnZ": 10,
'UUIDLeast': -7312926203658200544,
'UUIDMost': 6651100054519957107,
'XpLevel': 0,
'XpP': 0.0,
'XpSeed': 0,
'XpTotal': 0,
'abilities': {
'flySpeed': 0.05000000074505806,
'flying': 0,
'instabuild': 1,
'invulnerable': 1,
'mayBuild': 1,
'mayfly': 1,
'walkSpeed': 0.10000000149011612
},
'foodExhaustionLevel': 0.0,
'foodLevel': 20,
'foodSaturationLevel': 5.0,
'foodTickTimer': 0,
'playerGameType': 1,
'recipeBook': {
'isFilteringCraftable': 0,
'isFurnaceFilteringCraftable': 0,
'isFurnaceGuiOpen': 0,
'isGuiOpen': 0,
'recipes': [],
'toBeDisplayed': []
},
'seenCredits': 0
}
@patch('sys.stdout', new_callable=StringIO)
def test_print_player(self, mock_stdout):
expected = "\n".join([
"Position:\t-96, 70, -44\t(dim: 0)",
"Spawn:\t\t10, 52, 10",
"Health:\t20\tLevel:\t\t0\t\tGameType:\t1",
"Food:\t20\tTotal XP:\t0",
"Inventory: 1 items",
" 1 minecraft:sign\n"])
player_inspect.print_player(self.player_data)
self.assertEqual(mock_stdout.getvalue(), expected)
@patch('sys.stdout', new_callable=StringIO)
def test_print_player_no_spawn(self, mock_stdout):
expected = "\n".join([
"Position:\t-96, 70, -44\t(dim: 0)",
"Health:\t20\tLevel:\t\t0\t\tGameType:\t1",
"Food:\t20\tTotal XP:\t0",
"Inventory: 1 items",
" 1 minecraft:sign\n"])
player_data = {
k: v for k, v in self.player_data.items()
if k not in("SpawnX", "SpawnY", "SpawnZ")
}
player_inspect.print_player(player_data)
self.assertEqual(mock_stdout.getvalue(), expected)
@patch('sys.stdout', new_callable=StringIO)
def test_print_player_sub_entry(self, mock_stdout):
expected = "\n".join([
"\tPosition:\t-96, 70, -44\t(dim: 0)",
"\tSpawn:\t\t10, 52, 10",
"\tHealth:\t20\tLevel:\t\t0\t\tGameType:\t1",
"\tFood:\t20\tTotal XP:\t0",
"\tInventory: 1 items\n"])
player_inspect.print_player(self.player_data, sub_entry=True)
self.assertEqual(mock_stdout.getvalue(), expected)
@patch('sys.stdout', new_callable=StringIO)
def test_print_player_sub_entry_no_spawn(self, mock_stdout):
expected = "\n".join([
"\tPosition:\t-96, 70, -44\t(dim: 0)",
"\tHealth:\t20\tLevel:\t\t0\t\tGameType:\t1",
"\tFood:\t20\tTotal XP:\t0",
"\tInventory: 1 items\n"])
player_data = {
k: v for k, v in self.player_data.items()
if k not in("SpawnX", "SpawnY", "SpawnZ")
}
player_inspect.print_player(player_data, sub_entry=True)
self.assertEqual(mock_stdout.getvalue(), expected)
def test_find_all_player_files(self):
dir_path = MagicMock(Path)
files = [Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
Path('074c808a-1f04-4bdd-8385-bd74601210a1.dat'),
Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat')]
dir_path.iterdir.return_value = (f for f in files)
expected = [(Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
'def0492d-0fe9-43ff-a3d5-8c3fc9160c94'),
(Path('074c808a-1f04-4bdd-8385-bd74601210a1.dat'),
'074c808a-1f04-4bdd-8385-bd74601210a1'),
(Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat'),
'104e149d-a802-4a27-ac8f-ceab5279087c')]
result = player_inspect.find_all_player_files(dir_path)
self.assertListEqual(list(result), expected)
def test_find_player_file(self):
dir_path = MagicMock(Path)
files = [Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
Path('074c808a-1f04-4bdd-8385-bd74601210a1.dat'),
Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat')]
dir_path.iterdir.return_value = (f for f in files)
expected = (Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat'),
'104e149d-a802-4a27-ac8f-ceab5279087c')
result = player_inspect.find_player_file(
dir_path, selected_player='104e149d-a802-4a27-ac8f-ceab5279087c')
self.assertEqual(result, expected)
def test_find_player_file_raises_when_selected_player_not_found(self):
dir_path = MagicMock(Path)
files = [Path('def0492d-0fe9-43ff-a3d5-8c3fc9160c94.dat'),
Path('104e149d-a802-4a27-ac8f-ceab5279087c.dat')]
dir_path.iterdir.return_value = (f for f in files)
with self.assertRaises(FileNotFoundError):
player_inspect.find_player_file(dir_path, selected_player='NON_EXISTENT_UUID')

View File

@@ -0,0 +1,68 @@
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
import networkx
import contrib.regionTrimmer as region_trimmer
class TestRegionTrimmer(unittest.TestCase):
def test_get_nodes(self):
coords = [(0, 0), (0, -1), (-1, 0), (-1, -1)]
with TemporaryDirectory() as tmpdirname:
region_file = Path(tmpdirname)
for x, z in coords:
region_fname = "r.{x}.{z}.mca".format(x=x, z=z)
(region_file / region_fname).touch()
nodes = region_trimmer.get_nodes(region_file)
self.assertListEqual(sorted(nodes), sorted(coords))
def test_get_nodes_returns_empty_list_when_no_region_files(self):
with TemporaryDirectory() as tmpdirname:
region_file = Path(tmpdirname)
(region_file / "not_region_file.txt").touch()
nodes = region_trimmer.get_nodes(region_file)
self.assertListEqual(nodes, [])
def test_get_region_file_from_node(self):
node = (0, 0)
regionset_path = Path('/path/to/regions')
self.assertEqual(region_trimmer.get_region_file_from_node(
regionset_path, node), Path('/path/to/regions/r.0.0.mca'))
def test_get_graph_bounds(self):
""" Should return (max_x, min_x, max_z, min_z) of all nodes
"""
graph = networkx.Graph()
graph.add_nodes_from([(0, 0), (0, -1), (-1, 0), (-1, -1)])
self.assertEqual(region_trimmer.get_graph_bounds(graph), (0, -1, 0, -1))
def test_get_graph_center_by_bounds(self):
self.assertEqual(region_trimmer.get_graph_center_by_bounds((0, -1, 0, -1)), (-1, -1))
def test_generate_edges(self):
graph = networkx.Graph()
graph.add_nodes_from(
[(0, 0), (0, -1), (-1, 0), (-1, -1)]
)
graph = region_trimmer.generate_edges(graph)
self.assertEqual(
graph.adj,
{
(0, -1): {(0, 0): {}, (-1, -1): {}},
(0, 0): {
(0, -1): {},
(-1, 0): {},
(-1, -1): {},
},
(-1, 0): {(0, 0): {}, (-1, -1): {}},
(-1, -1): {
(0, -1): {},
(0, 0): {},
(-1, 0): {},
},
},
)

108
test/test_testRender.py Normal file
View File

@@ -0,0 +1,108 @@
import tempfile
import unittest
from unittest.mock import patch
from subprocess import CalledProcessError, PIPE, STDOUT
import contrib.testRender as test_render
from io import StringIO
from shlex import split
class TestTestRender(unittest.TestCase):
@patch("contrib.testRender.run")
def test_check_call_raises_CalledProcessError_from_subprocess_run(self, m_run):
m_run.side_effect = CalledProcessError(1, "python program.js")
with self.assertRaises(CalledProcessError):
test_render.check_call(["python", "program.js"])
@patch("contrib.testRender.run")
def test_check_call_captures_stdout_if_not_verbose(self, m_run):
test_render.check_call(["python", "program.py"])
args, kwargs = m_run.call_args
self.assertEqual(kwargs['stdout'], PIPE)
self.assertEqual(kwargs['stderr'], STDOUT)
@patch("contrib.testRender.run")
def test_check_call_does_not_capture_stdout_if_verbose(self, m_run):
test_render.check_call(["python", "program.py"], verbose=True)
args, kwargs = m_run.call_args
self.assertEqual(kwargs['stdout'], None)
self.assertEqual(kwargs['stderr'], None)
@patch('sys.stdout', new_callable=StringIO)
@patch("contrib.testRender.run")
def test_check_call_prints_exception_output_if_verbose(self, m_run, m_out):
m_run.side_effect = CalledProcessError(
1, "python program.js", output="SyntaxError: invalid syntax")
with self.assertRaises(CalledProcessError):
test_render.check_call(["python", "program.js"], verbose=True)
self.assertEqual(m_out.getvalue().strip(), "SyntaxError: invalid syntax")
@patch("contrib.testRender.run")
def test_check_output_captures_stdout(self, m_run):
test_render.check_call(["python", "program.py"])
args, kwargs = m_run.call_args
self.assertEqual(kwargs['stdout'], PIPE)
@patch('contrib.testRender.check_output')
def test_get_commits(self, m_check_output):
gitrange = '2eca1a5fb5fa7eeb5494abb350cd535f67acfb8b..08a86a52abfabd59ac68b37dc7e5270bd7fb328a'
m_check_output.return_value = (
"commit 2eca1a5fb5fa7eeb5494abb350cd535f67acfb8b\nAuthor: Andrew "
"<andrew@fry.(none)>\nDate: Sun Aug 22 10:16:10 2010 -0400\n\n "
" initial comit\n\n:000000 100644 0000000 c398ada A\tchunk.py\n:000000 "
"100644 0000000 d5ee6ed A\tnbt.py\n:000000 100644 0000000 8fc65c9 A\ttextures.py\n:"
"000000 100644 0000000 6934326 A\tworld.py\n\ncommit 08a86a52abfabd59ac68b37dc7e5270bd7fb328a"
"\nAuthor: Andrew <andrew@fry.(none)>\nDate: Tue Aug 24 21:11:57 2010 -0400\n\n "
"uses multiprocessing to speed up rendering. Caches chunks\n\n:1"
)
result = list(test_render.get_commits(gitrange))
self.assertListEqual(result, ['2eca1a5fb5fa7eeb5494abb350cd535f67acfb8b',
'08a86a52abfabd59ac68b37dc7e5270bd7fb328a'])
@patch('contrib.testRender.check_output', return_value="my-feature-branch")
def test_get_current_branch(self, m_check_output):
self.assertEqual(test_render.get_current_branch(), "my-feature-branch")
@patch('contrib.testRender.check_output', return_value="HEAD")
def test_get_current_branch_returns_none_for_detached_head(self, m_check_output):
self.assertIsNone(test_render.get_current_branch())
@patch('contrib.testRender.check_output', return_value="3f1f3d748e1c79843279ba18ab65a34368b95b67")
def test_get_current_commit(self, m_check_output):
self.assertEqual(
test_render.get_current_branch(),
"3f1f3d748e1c79843279ba18ab65a34368b95b67"
)
@patch('contrib.testRender.get_current_branch', return_value="my-feature-branch")
def test_get_current_ref_returns_branch_name_if_possible(self, m_branch):
self.assertEqual(test_render.get_current_ref(), "my-feature-branch")
@patch('contrib.testRender.get_current_commit', return_value="3f1f3d748e1c79843279ba18ab65a34368b95b67")
@patch('contrib.testRender.get_current_branch', return_value=None)
def test_get_current_ref_returns_current_commit_if_no_branch(self, m_branch, m_commit):
self.assertEqual(
test_render.get_current_ref(),
"3f1f3d748e1c79843279ba18ab65a34368b95b67"
)
@patch('contrib.testRender.check_output')
def test_get_commits(self, m_check_output):
m_check_output.return_value = "\n".join(
[
"41ceaeab58473416bb79680ab21211764e6f1908",
"a4d0daa91c25a51ca95182301e503c020900dafe",
"05906c81f5778a543dfab14e77231db0a99bae24",
]
)
gitrange = "41ceaeab58473416bb79680ab21211764e6f1908..05906c81f5778a543dfab14e77231db0a99bae24"
result = list(test_render.get_commits(gitrange))
self.assertListEqual(
result,
[
"41ceaeab58473416bb79680ab21211764e6f1908",
"a4d0daa91c25a51ca95182301e503c020900dafe",
"05906c81f5778a543dfab14e77231db0a99bae24"
]
)