Merge branch 'master' of https://github.com/rmccue/Minecraft-Overviewer into rmccue-master
This commit is contained in:
64
chunk.py
64
chunk.py
@@ -112,12 +112,38 @@ def iterate_chunkblocks(xoff,yoff):
|
||||
transparent_blocks = set([0, 6, 8, 9, 18, 20, 37, 38, 39, 40, 44, 50, 51, 52, 53,
|
||||
59, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 81, 83, 85])
|
||||
|
||||
def render_and_save(chunkfile, cachedir, worldobj, cave=False, queue=None):
|
||||
def find_oldimage(chunkfile, cached, cave):
|
||||
destdir, filename = os.path.split(chunkfile)
|
||||
filename_split = filename.split(".")
|
||||
blockid = ".".join(filename_split[1:3])
|
||||
|
||||
# Get the name of the existing image.
|
||||
moredirs, dir2 = os.path.split(destdir)
|
||||
dir1 = os.path.basename(moredirs)
|
||||
cachename = '/'.join((dir1, dir2))
|
||||
|
||||
oldimg = oldimg_path = None
|
||||
key = ".".join((blockid, "cave" if cave else "nocave"))
|
||||
if key in cached[cachename]:
|
||||
oldimg_path = cached[cachename][key]
|
||||
_, oldimg = os.path.split(oldimg_path)
|
||||
logging.debug("Found cached image {0}".format(oldimg))
|
||||
return oldimg, oldimg_path
|
||||
|
||||
def check_cache(chunkfile, oldimg):
|
||||
try:
|
||||
if oldimg[1] and os.path.getmtime(chunkfile) <= os.path.getmtime(oldimg[1]):
|
||||
return True
|
||||
return False
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
def render_and_save(chunkfile, cachedir, worldobj, oldimg, cave=False, queue=None):
|
||||
"""Used as the entry point for the multiprocessing workers (since processes
|
||||
can't target bound methods) or to easily render and save one chunk
|
||||
|
||||
Returns the image file location"""
|
||||
a = ChunkRenderer(chunkfile, cachedir, worldobj, queue)
|
||||
a = ChunkRenderer(chunkfile, cachedir, worldobj, oldimg, queue)
|
||||
try:
|
||||
return a.render_and_save(cave)
|
||||
except ChunkCorrupt:
|
||||
@@ -140,7 +166,7 @@ class ChunkCorrupt(Exception):
|
||||
pass
|
||||
|
||||
class ChunkRenderer(object):
|
||||
def __init__(self, chunkfile, cachedir, worldobj, queue):
|
||||
def __init__(self, chunkfile, cachedir, worldobj, oldimg, queue):
|
||||
"""Make a new chunk renderer for the given chunkfile.
|
||||
chunkfile should be a full path to the .dat file to process
|
||||
cachedir is a directory to save the resulting chunk images to
|
||||
@@ -169,6 +195,7 @@ class ChunkRenderer(object):
|
||||
moredirs, dir2 = os.path.split(destdir)
|
||||
_, dir1 = os.path.split(moredirs)
|
||||
self.cachedir = os.path.join(cachedir, dir1, dir2)
|
||||
self.oldimg, self.oldimg_path = oldimg
|
||||
|
||||
|
||||
if self.world.useBiomeData:
|
||||
@@ -300,36 +327,12 @@ class ChunkRenderer(object):
|
||||
self._digest = digest[:6]
|
||||
return self._digest
|
||||
|
||||
def find_oldimage(self, cave):
|
||||
# Get the name of the existing image. No way to do this but to look at
|
||||
# all the files
|
||||
oldimg = oldimg_path = None
|
||||
for filename in os.listdir(self.cachedir):
|
||||
if filename.startswith("img.{0}.{1}.".format(self.blockid,
|
||||
"cave" if cave else "nocave")) and \
|
||||
filename.endswith(".png"):
|
||||
oldimg = filename
|
||||
oldimg_path = os.path.join(self.cachedir, oldimg)
|
||||
break
|
||||
return oldimg, oldimg_path
|
||||
|
||||
def render_and_save(self, cave=False):
|
||||
"""Render the chunk using chunk_render, and then save it to a file in
|
||||
the same directory as the source image. If the file already exists and
|
||||
is up to date, this method doesn't render anything.
|
||||
"""
|
||||
blockid = self.blockid
|
||||
|
||||
oldimg, oldimg_path = self.find_oldimage(cave)
|
||||
|
||||
if oldimg:
|
||||
# An image exists? Instead of checking the hash which is kinda
|
||||
# expensive (for tens of thousands of chunks, yes it is) check if
|
||||
# the mtime of the chunk file is newer than the mtime of oldimg
|
||||
if os.path.getmtime(self.chunkfile) <= os.path.getmtime(oldimg_path):
|
||||
# chunkfile is older than the image, don't even bother checking
|
||||
# the hash
|
||||
return oldimg_path
|
||||
|
||||
# Reasons for the code to get to this point:
|
||||
# 1) An old image doesn't exist
|
||||
@@ -347,18 +350,19 @@ class ChunkRenderer(object):
|
||||
|
||||
dest_path = os.path.join(self.cachedir, dest_filename)
|
||||
|
||||
if oldimg:
|
||||
if dest_filename == oldimg:
|
||||
if self.oldimg:
|
||||
if dest_filename == self.oldimg:
|
||||
# There is an existing file, the chunk has a newer mtime, but the
|
||||
# hashes match.
|
||||
# Before we return it, update its mtime so the next round
|
||||
# doesn't have to check the hash
|
||||
os.utime(dest_path, None)
|
||||
logging.debug("Using cached image")
|
||||
return dest_path
|
||||
else:
|
||||
# Remove old image for this chunk. Anything already existing is
|
||||
# either corrupt or out of date
|
||||
os.unlink(oldimg_path)
|
||||
os.unlink(self.oldimg_path)
|
||||
|
||||
# Render the chunk
|
||||
img = self.chunk_render(cave=cave)
|
||||
|
||||
45
world.py
45
world.py
@@ -21,6 +21,7 @@ import Queue
|
||||
import sys
|
||||
import logging
|
||||
import cPickle
|
||||
import collections
|
||||
|
||||
import numpy
|
||||
|
||||
@@ -35,6 +36,7 @@ and for extracting information about available worlds
|
||||
"""
|
||||
|
||||
base36decode = functools.partial(int, base=36)
|
||||
cached = collections.defaultdict(dict)
|
||||
|
||||
|
||||
def _convert_coords(chunks):
|
||||
@@ -85,6 +87,12 @@ def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
|
||||
return "-" + base36
|
||||
return base36
|
||||
|
||||
class FakeAsyncResult:
|
||||
def __init__(self, string):
|
||||
self.string = string
|
||||
def get(self):
|
||||
return self.string
|
||||
|
||||
class WorldRenderer(object):
|
||||
"""Renders a world's worth of chunks.
|
||||
worlddir is the path to the minecraft world
|
||||
@@ -108,6 +116,20 @@ class WorldRenderer(object):
|
||||
|
||||
self.chunklist = chunklist
|
||||
|
||||
# In order to avoid having to look up the cache file names in
|
||||
# ChunkRenderer, get them all and store them here
|
||||
for root, dirnames, filenames in os.walk(cachedir):
|
||||
for filename in filenames:
|
||||
if not filename.endswith('.png'):
|
||||
continue
|
||||
dirname, dir_b = os.path.split(root)
|
||||
_, dir_a = os.path.split(dirname)
|
||||
_, x, z, cave, _ = filename.split('.', 4)
|
||||
dir = '/'.join((dir_a, dir_b))
|
||||
bits = '.'.join((x, z, cave))
|
||||
cached[dir][bits] = os.path.join(root, filename)
|
||||
|
||||
|
||||
# stores Points Of Interest to be mapped with markers
|
||||
# a list of dictionaries, see below for an example
|
||||
self.POI = []
|
||||
@@ -273,13 +295,17 @@ class WorldRenderer(object):
|
||||
for i, (col, row, chunkfile) in enumerate(chunks):
|
||||
if inclusion_set and (col, row) not in inclusion_set:
|
||||
# Skip rendering, just find where the existing image is
|
||||
_, imgpath = chunk.ChunkRenderer(chunkfile,
|
||||
self.cachedir, self, q).find_oldimage(False)
|
||||
_, imgpath = chunk.find_oldimage(chunkfile, cached, self.caves)
|
||||
if imgpath:
|
||||
results[(col, row)] = imgpath
|
||||
continue
|
||||
|
||||
result = chunk.render_and_save(chunkfile, self.cachedir, self, cave=self.caves, queue=q)
|
||||
oldimg = chunk.find_oldimage(chunkfile, cached, self.caves)
|
||||
if chunk.check_cache(chunkfile, oldimg):
|
||||
result = oldimg[1]
|
||||
else:
|
||||
result = chunk.render_and_save(chunkfile, self.cachedir, self, oldimg, queue=q)
|
||||
|
||||
results[(col, row)] = result
|
||||
if i > 0:
|
||||
try:
|
||||
@@ -299,15 +325,18 @@ class WorldRenderer(object):
|
||||
for col, row, chunkfile in chunks:
|
||||
if inclusion_set and (col, row) not in inclusion_set:
|
||||
# Skip rendering, just find where the existing image is
|
||||
_, imgpath = chunk.ChunkRenderer(chunkfile,
|
||||
self.cachedir, self, q).find_oldimage(False)
|
||||
_, imgpath = chunk.find_oldimage(chunkfile, cached, self.caves)
|
||||
if imgpath:
|
||||
results[(col, row)] = imgpath
|
||||
continue
|
||||
|
||||
result = pool.apply_async(chunk.render_and_save,
|
||||
args=(chunkfile,self.cachedir,self),
|
||||
kwds=dict(cave=self.caves, queue=q))
|
||||
oldimg = chunk.find_oldimage(chunkfile, cached, self.caves)
|
||||
if chunk.check_cache(chunkfile, oldimg):
|
||||
result = FakeAsyncResult(oldimg[1])
|
||||
else:
|
||||
result = pool.apply_async(chunk.render_and_save,
|
||||
args=(chunkfile,self.cachedir,self, oldimg),
|
||||
kwds=dict(cave=self.caves, queue=q))
|
||||
asyncresults.append((col, row, result))
|
||||
|
||||
pool.close()
|
||||
|
||||
Reference in New Issue
Block a user