0

can now specify a chunk cache directory manually

This commit is contained in:
Andrew Brown
2010-09-22 23:43:40 -04:00
parent 0c803608f3
commit c53070304f
3 changed files with 54 additions and 23 deletions

View File

@@ -64,12 +64,12 @@ def get_skylight_array(level):
transparent_blocks = set([0, 6, 8, 9, 18, 20, 37, 38, 39, 40, 50, 51, 52, 53, transparent_blocks = set([0, 6, 8, 9, 18, 20, 37, 38, 39, 40, 50, 51, 52, 53,
59, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 79, 83, 85]) 59, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 79, 83, 85])
def render_and_save(chunkfile, cave=False): def render_and_save(chunkfile, cachedir, cave=False):
"""Used as the entry point for the multiprocessing workers (since processes """Used as the entry point for the multiprocessing workers (since processes
can't target bound methods) or to easily render and save one chunk can't target bound methods) or to easily render and save one chunk
Returns the image file location""" Returns the image file location"""
a = ChunkRenderer(chunkfile) a = ChunkRenderer(chunkfile, cachedir)
try: try:
return a.render_and_save(cave) return a.render_and_save(cave)
except Exception, e: except Exception, e:
@@ -80,19 +80,38 @@ def render_and_save(chunkfile, cave=False):
print print
print "You pressed Ctrl-C. Exiting..." print "You pressed Ctrl-C. Exiting..."
# Raise an exception that is an instance of Exception. Unlike # Raise an exception that is an instance of Exception. Unlike
# KeyboardInterrupt, that will kill the process instead of having it # KeyboardInterrupt, this will re-raise in the parent, killing the
# propagate the exception back to the parent process. # entire program, instead of this process dying and the parent waiting
# forever for it to finish.
raise Exception() raise Exception()
class ChunkRenderer(object): class ChunkRenderer(object):
def __init__(self, chunkfile): def __init__(self, chunkfile, cachedir):
"""Make a new chunk renderer for the given chunkfile.
chunkfile should be a full path to the .dat file to process
cachedir is a directory to save the resulting chunk images to
"""
if not os.path.exists(chunkfile): if not os.path.exists(chunkfile):
raise ValueError("Could not find chunkfile") raise ValueError("Could not find chunkfile")
self.chunkfile = chunkfile self.chunkfile = chunkfile
destdir, filename = os.path.split(self.chunkfile) destdir, filename = os.path.split(self.chunkfile)
self.destdir = os.path.abspath(destdir)
self.blockid = ".".join(filename.split(".")[1:3]) self.blockid = ".".join(filename.split(".")[1:3])
# Cachedir here is the base directory of the caches. We need to go 2
# levels deeper according to the chunk file. Get the last 2 components
# of destdir and use that
moredirs, dir2 = os.path.split(destdir)
_, dir1 = os.path.split(moredirs)
self.cachedir = os.path.join(cachedir, dir1, dir2)
if not os.path.exists(self.cachedir):
try:
os.makedirs(self.cachedir)
except OSError, e:
import errno
if e.errno != errno.EEXIST:
raise
def _load_level(self): def _load_level(self):
"""Loads and returns the level structure""" """Loads and returns the level structure"""
if not hasattr(self, "_level"): if not hasattr(self, "_level"):
@@ -127,12 +146,12 @@ class ChunkRenderer(object):
# Get the name of the existing image. No way to do this but to look at # Get the name of the existing image. No way to do this but to look at
# all the files # all the files
oldimg = oldimg_path = None oldimg = oldimg_path = None
for filename in os.listdir(self.destdir): for filename in os.listdir(self.cachedir):
if filename.startswith("img.{0}.{1}.".format(self.blockid, if filename.startswith("img.{0}.{1}.".format(self.blockid,
"cave" if cave else "nocave")) and \ "cave" if cave else "nocave")) and \
filename.endswith(".png"): filename.endswith(".png"):
oldimg = filename oldimg = filename
oldimg_path = os.path.join(self.destdir, oldimg) oldimg_path = os.path.join(self.cachedir, oldimg)
break break
return oldimg, oldimg_path return oldimg, oldimg_path
@@ -141,7 +160,6 @@ class ChunkRenderer(object):
the same directory as the source image. If the file already exists and the same directory as the source image. If the file already exists and
is up to date, this method doesn't render anything. is up to date, this method doesn't render anything.
""" """
destdir = self.destdir
blockid = self.blockid blockid = self.blockid
oldimg, oldimg_path = self._find_oldimage(cave) oldimg, oldimg_path = self._find_oldimage(cave)
@@ -169,7 +187,7 @@ class ChunkRenderer(object):
self._hash_blockarray(), self._hash_blockarray(),
) )
dest_path = os.path.join(destdir, dest_filename) dest_path = os.path.join(self.cachedir, dest_filename)
if oldimg: if oldimg:
if dest_filename == oldimg: if dest_filename == oldimg:

26
gmap.py
View File

@@ -28,7 +28,7 @@ import world
import quadtree import quadtree
helptext = """ helptext = """
%prog [-p PROCS] [-d] <World # / Path to World> <tiles dest dir> %prog [OPTIONS] <World # / Path to World> <tiles dest dir>
""" """
def main(): def main():
@@ -40,6 +40,7 @@ def main():
parser.add_option("-p", "--processes", dest="procs", help="How many chunks to render in parallel. A good number for this is the number of cores in your computer. Default %s" % cpus, default=cpus, action="store", type="int") parser.add_option("-p", "--processes", dest="procs", help="How many chunks to render in parallel. A good number for this is the number of cores in your computer. Default %s" % cpus, default=cpus, action="store", type="int")
parser.add_option("-z", "--zoom", dest="zoom", help="Sets the zoom level manually instead of calculating it. This can be useful if you have outlier chunks that make your world too big. This value will make the highest zoom level contain (2**ZOOM)^2 tiles", action="store", type="int") parser.add_option("-z", "--zoom", dest="zoom", help="Sets the zoom level manually instead of calculating it. This can be useful if you have outlier chunks that make your world too big. This value will make the highest zoom level contain (2**ZOOM)^2 tiles", action="store", type="int")
parser.add_option("-d", "--delete", dest="delete", help="Clear all caches. Next time you render your world, it will have to start completely over again. This is probably not a good idea for large worlds. Use this if you change texture packs and want to re-render everything.", action="store_true") parser.add_option("-d", "--delete", dest="delete", help="Clear all caches. Next time you render your world, it will have to start completely over again. This is probably not a good idea for large worlds. Use this if you change texture packs and want to re-render everything.", action="store_true")
parser.add_option("--cachedir", dest="cachedir", help="Sets the directory where the Overviewer will save chunk images, which is an intermediate step before the tiles are generated. You must use the same directory each time to gain any benefit from the cache. If not set, this defaults to your world directory.")
options, args = parser.parse_args() options, args = parser.parse_args()
@@ -59,15 +60,21 @@ def main():
parser.print_help() parser.print_help()
sys.exit(1) sys.exit(1)
if not options.cachedir:
cachedir = worlddir
else:
cachedir = options.cachedir
if len(args) != 2: if len(args) != 2:
if options.delete:
return delete_all(cachedir, None)
parser.error("Where do you want to save the tiles?") parser.error("Where do you want to save the tiles?")
destdir = args[1] destdir = args[1]
if options.delete: if options.delete:
return delete_all(worlddir, destdir) return delete_all(cachedir, destdir)
# First generate the world's chunk images # First generate the world's chunk images
w = world.WorldRenderer(worlddir) w = world.WorldRenderer(worlddir, cachedir)
w.go(options.procs) w.go(options.procs)
# Now generate the tiles # Now generate the tiles
@@ -87,12 +94,13 @@ def delete_all(worlddir, tiledir):
os.unlink(filepath) os.unlink(filepath)
# Now delete all /hash/ files in the tile dir. # Now delete all /hash/ files in the tile dir.
for dirpath, dirnames, filenames in os.walk(tiledir): if tiledir:
for f in filenames: for dirpath, dirnames, filenames in os.walk(tiledir):
if f.endswith(".hash"): for f in filenames:
filepath = os.path.join(dirpath, f) if f.endswith(".hash"):
print "Deleting {0}".format(filepath) filepath = os.path.join(dirpath, f)
os.unlink(filepath) print "Deleting {0}".format(filepath)
os.unlink(filepath)
def list_worlds(): def list_worlds():
"Prints out a brief summary of saves found in the default directory" "Prints out a brief summary of saves found in the default directory"

View File

@@ -58,10 +58,14 @@ def _convert_coords(chunks):
return mincol, maxcol, minrow, maxrow, chunks_translated return mincol, maxcol, minrow, maxrow, chunks_translated
class WorldRenderer(object): class WorldRenderer(object):
"""Renders a world's worth of chunks""" """Renders a world's worth of chunks.
def __init__(self, worlddir): worlddir is the path to the minecraft world
cachedir is the path to a directory that should hold the resulting images.
It may be the same as worlddir (which used to be the default)"""
def __init__(self, worlddir, cachedir):
self.worlddir = worlddir self.worlddir = worlddir
self.caves = False self.caves = False
self.cachedir = cachedir
def go(self, procs): def go(self, procs):
"""Starts the render. This returns when it is finished""" """Starts the render. This returns when it is finished"""
@@ -109,7 +113,7 @@ class WorldRenderer(object):
# Skip the multiprocessing stuff # Skip the multiprocessing stuff
print "Rendering chunks synchronously since you requested 1 process" print "Rendering chunks synchronously since you requested 1 process"
for i, (col, row, chunkfile) in enumerate(chunks): for i, (col, row, chunkfile) in enumerate(chunks):
result = chunk.render_and_save(chunkfile, cave=self.caves) result = chunk.render_and_save(chunkfile, self.cachedir, cave=self.caves)
results[(col, row)] = result results[(col, row)] = result
if i > 0: if i > 0:
if 1000 % i == 0 or i % 1000 == 0: if 1000 % i == 0 or i % 1000 == 0:
@@ -119,7 +123,8 @@ class WorldRenderer(object):
pool = multiprocessing.Pool(processes=processes) pool = multiprocessing.Pool(processes=processes)
asyncresults = [] asyncresults = []
for col, row, chunkfile in chunks: for col, row, chunkfile in chunks:
result = pool.apply_async(chunk.render_and_save, args=(chunkfile,), result = pool.apply_async(chunk.render_and_save,
args=(chunkfile,self.cachedir),
kwds=dict(cave=self.caves)) kwds=dict(cave=self.caves))
asyncresults.append((col, row, result)) asyncresults.append((col, row, result))