From d637ddbbe105d2e4dc17f3cf158bf1c86d0a13e5 Mon Sep 17 00:00:00 2001 From: Andrew Brown Date: Mon, 27 Sep 2010 00:52:11 -0400 Subject: [PATCH] added the option to specify a list of chunks to update. --- README.rst | 12 +++++++++ chunk.py | 4 +-- gmap.py | 9 ++++++- world.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 93 insertions(+), 8 deletions(-) diff --git a/README.rst b/README.rst index 39ba237..007de92 100644 --- a/README.rst +++ b/README.rst @@ -172,6 +172,18 @@ Options significantly longer, since it is having to re-generate the files you just deleted. +--chunklist=CHUNKLIST + Use this option to specify manually a list of chunks to consider for + updating. Without this option, every chunk is checked for update and if + necessary, re-rendered. If this option points to a file containing, 1 per + line, the path to a chunk data file, then only those in the list will be + considered for update. + + It's up to you to build such a list. On Linux or Mac, try using the "find" + command. You could, for example, output all chunk files that are older than + a certain date. Or perhaps you can incrementally update your map by passing + in a subset of chunks each time. It's up to you! + Viewing the Results ------------------- Within the output directory you will find two things: an index.html file, and a diff --git a/chunk.py b/chunk.py index d8ca70c..c548afa 100644 --- a/chunk.py +++ b/chunk.py @@ -142,7 +142,7 @@ class ChunkRenderer(object): self._digest = digest[:6] return self._digest - def _find_oldimage(self, cave): + def find_oldimage(self, cave): # Get the name of the existing image. No way to do this but to look at # all the files oldimg = oldimg_path = None @@ -162,7 +162,7 @@ class ChunkRenderer(object): """ blockid = self.blockid - oldimg, oldimg_path = self._find_oldimage(cave) + oldimg, oldimg_path = self.find_oldimage(cave) if oldimg: # An image exists? Instead of checking the hash which is kinda diff --git a/gmap.py b/gmap.py index 22048d2..97eeb97 100755 --- a/gmap.py +++ b/gmap.py @@ -45,6 +45,7 @@ def main(): parser.add_option("-z", "--zoom", dest="zoom", help="Sets the zoom level manually instead of calculating it. This can be useful if you have outlier chunks that make your world too big. This value will make the highest zoom level contain (2**ZOOM)^2 tiles", action="store", type="int") parser.add_option("-d", "--delete", dest="delete", help="Clear all caches. Next time you render your world, it will have to start completely over again. This is probably not a good idea for large worlds. Use this if you change texture packs and want to re-render everything.", action="store_true") parser.add_option("--cachedir", dest="cachedir", help="Sets the directory where the Overviewer will save chunk images, which is an intermediate step before the tiles are generated. You must use the same directory each time to gain any benefit from the cache. If not set, this defaults to your world directory.") + parser.add_option("--chunklist", dest="chunklist", help="A file containing, on each line, a path to a chunkfile to update. Instead of scanning the world directory for chunks, it will just use this list. Normal caching rules still apply.") options, args = parser.parse_args() @@ -78,8 +79,14 @@ def main(): if options.delete: return delete_all(cachedir, destdir) + + if options.chunklist: + chunklist = open(options.chunklist, 'r') + else: + chunklist = None + # First generate the world's chunk images - w = world.WorldRenderer(worlddir, cachedir) + w = world.WorldRenderer(worlddir, cachedir, chunklist=chunklist) w.go(options.procs) # Now generate the tiles diff --git a/world.py b/world.py index 8e44635..131cfce 100644 --- a/world.py +++ b/world.py @@ -17,8 +17,9 @@ import functools import os import os.path import multiprocessing -import numpy +import sys +import numpy from PIL import Image import chunk @@ -85,16 +86,51 @@ class WorldRenderer(object): """Renders a world's worth of chunks. worlddir is the path to the minecraft world cachedir is the path to a directory that should hold the resulting images. - It may be the same as worlddir (which used to be the default)""" - def __init__(self, worlddir, cachedir): + It may be the same as worlddir (which used to be the default). + + If chunklist is given, it is assumed to be an iterator over paths to chunk + files to update. If it includes a trailing newline, it is stripped, so you + can pass in file handles just fine. + """ + def __init__(self, worlddir, cachedir, chunklist=None): self.worlddir = worlddir self.caves = False self.cachedir = cachedir + self.chunklist = chunklist + # stores Points Of Interest to be mapped with markers # a list of dictionaries, see below for an example self.POI = [] + def _get_chunk_renderset(self): + """Returns a set of (col, row) chunks that should be rendered. Returns + None if all chunks should be rendered""" + if not self.chunklist: + return None + + # Get a list of the (chunks, chunky, filename) from the passed in list + # of filenames + chunklist = [] + for path in self.chunklist: + if path.endswith("\n"): + path = path[:-1] + f = os.path.basename(path) + if f and f.startswith("c.") and f.endswith(".dat"): + p = f.split(".") + chunklist.append((base36decode(p[1]), base36decode(p[2]), + path)) + + # Translate to col, row coordinates + _, _, _, _, chunklist = _convert_coords(chunklist) + + # Build a set from the col, row pairs + inclusion_set = set() + for col, row, filename in chunklist: + inclusion_set.add((col, row)) + + return inclusion_set + def findTrueSpawn(self): """Adds the true spawn location to self.POI. The spawn Y coordinate is almost always the default of 64. Find the first air block above @@ -157,8 +193,12 @@ class WorldRenderer(object): Returns a list of (chunkx, chunky, filename) where chunkx and chunky are given in chunk coordinates. Use convert_coords() to turn the resulting list - into an oblique coordinate system""" + into an oblique coordinate system. + + Usually this scans the given worlddir, but will use the chunk list + given to the constructor if one was provided.""" all_chunks = [] + for dirpath, dirnames, filenames in os.walk(self.worlddir): if not dirnames and filenames: for f in filenames: @@ -166,6 +206,10 @@ class WorldRenderer(object): p = f.split(".") all_chunks.append((base36decode(p[1]), base36decode(p[2]), os.path.join(dirpath, f))) + + if not all_chunks: + print "Error: No chunks found!" + sys.exit(1) return all_chunks def _render_chunks_async(self, chunks, processes): @@ -176,11 +220,25 @@ class WorldRenderer(object): Returns a dictionary mapping (col, row) to the file where that chunk is rendered as an image """ + # The set of chunks to render, or None for all of them. The logic is + # slightly more compliated than it should seem, since we still need to + # build the results dict out of all chunks, even if they're not being + # rendered. + inclusion_set = self._get_chunk_renderset() + results = {} if processes == 1: # Skip the multiprocessing stuff print "Rendering chunks synchronously since you requested 1 process" for i, (col, row, chunkfile) in enumerate(chunks): + if inclusion_set and (col, row) not in inclusion_set: + # Skip rendering, just find where the existing image is + _, imgpath = chunk.ChunkRenderer(chunkfile, + self.cachedir).find_oldimage(False) + if imgpath: + results[(col, row)] = imgpath + continue + result = chunk.render_and_save(chunkfile, self.cachedir, cave=self.caves) results[(col, row)] = result if i > 0: @@ -191,6 +249,14 @@ class WorldRenderer(object): pool = multiprocessing.Pool(processes=processes) asyncresults = [] for col, row, chunkfile in chunks: + if inclusion_set and (col, row) not in inclusion_set: + # Skip rendering, just find where the existing image is + _, imgpath = chunk.ChunkRenderer(chunkfile, + self.cachedir).find_oldimage(False) + if imgpath: + results[(col, row)] = imgpath + continue + result = pool.apply_async(chunk.render_and_save, args=(chunkfile,self.cachedir), kwds=dict(cave=self.caves)) @@ -202,7 +268,7 @@ class WorldRenderer(object): results[(col, row)] = result.get() if i > 0: if 1000 % i == 0 or i % 1000 == 0: - print "{0}/{1} chunks rendered".format(i, len(chunks)) + print "{0}/{1} chunks rendered".format(i, len(asyncresults)) pool.join() print "Done!"