0

implement observer model

This commit is contained in:
aheadley
2012-03-16 22:26:16 -04:00
parent b1dba9518c
commit 46b04dd09a
3 changed files with 205 additions and 120 deletions

View File

@@ -393,32 +393,11 @@ dir but you forgot to put quotes around the directory, since it contains spaces.
# multiprocessing dispatcher # multiprocessing dispatcher
if config['processes'] == 1: if config['processes'] == 1:
dispatch = dispatcher.Dispatcher() dispatch = dispatcher.Dispatcher(observer=dispatcher.ProgressBarObserver)
else: else:
dispatch = dispatcher.MultiprocessingDispatcher(local_procs=config['processes']) dispatch = dispatcher.MultiprocessingDispatcher(local_procs=config['processes'],
last_status_print = time.time() observer=dispatcher.ProgressBarObserver)
def print_status(phase, completed, total): dispatch.render_all(tilesets)
# phase is ignored. it's always zero?
if (total == 0):
percent = 100
logging.info("Rendered %d of %d tiles. %d%% complete", completed, total, percent)
elif total == None:
logging.info("Rendered %d tiles.", completed)
else:
percent = int(100* completed/total)
logging.info("Rendered %d of %d. %d%% complete", completed, total, percent)
def update_pbar(phase, completed, total):
if total is None or total == 0:
print_status(phase, completed, total)
else:
pbar = progressbar.ProgressBar(
widgets=['Rendering: ', progressbar.FractionWidget(), ' (',
progressbar.Percentage(), ') ',
progressbar.Bar(left='[', right=']'), ' ', progressbar.ETA()],
maxval=total).start().update(completed)
dispatch.render_all(tilesets, update_pbar)
dispatch.close() dispatch.close()
assetMrg.finalize(tilesets) assetMrg.finalize(tilesets)

View File

@@ -20,6 +20,8 @@ import cPickle as pickle
import Queue import Queue
import time import time
import logging import logging
import progressbar
import sys
from signals import Signal from signals import Signal
@@ -30,7 +32,7 @@ class Dispatcher(object):
possible to create a Dispatcher that distributes this work to many possible to create a Dispatcher that distributes this work to many
worker processes. worker processes.
""" """
def __init__(self): def __init__(self, observer=None):
super(Dispatcher, self).__init__() super(Dispatcher, self).__init__()
# list of (tileset, workitem) tuples # list of (tileset, workitem) tuples
@@ -40,7 +42,9 @@ class Dispatcher(object):
# keeps track of jobs waiting to run after dependencies finish # keeps track of jobs waiting to run after dependencies finish
self._pending_jobs = [] self._pending_jobs = []
def render_all(self, tilesetlist, status_callback): self.observer_type = observer or LoggingObserver
def render_all(self, tilesetlist):
"""Render all of the tilesets in the given """Render all of the tilesets in the given
tilesetlist. status_callback is called periodically to update tilesetlist. status_callback is called periodically to update
status. The callback should take the following arguments: status. The callback should take the following arguments:
@@ -76,20 +80,32 @@ class Dispatcher(object):
total_jobs += jobs_for_tileset total_jobs += jobs_for_tileset
finished_jobs = 0 finished_jobs = 0
self.observer = self.observer_type(total_jobs)
# do the first status update # do the first status update
self._status_update(status_callback, phase, finished_jobs, total_jobs, force=True) #self._status_update(status_callback, phase, finished_jobs, total_jobs, force=True)
self.observer.start()
self._status_update(phase, finished_jobs, True)
# go through these iterators round-robin style # go through these iterators round-robin style
for tileset, (workitem, deps) in util.roundrobin(work_iterators): for tileset, (workitem, deps) in util.roundrobin(work_iterators):
self._pending_jobs.append((tileset, workitem, deps)) self._pending_jobs.append((tileset, workitem, deps))
finished_jobs += self._dispatch_jobs() finished_jobs += self._dispatch_jobs()
self._status_update(status_callback, phase, finished_jobs, total_jobs) self._status_update(phase, finished_jobs)
# after each phase, wait for the work to finish # after each phase, wait for the work to finish
while len(self._pending_jobs) > 0 or len(self._running_jobs) > 0: while len(self._pending_jobs) > 0 or len(self._running_jobs) > 0:
finished_jobs += self._dispatch_jobs() finished_jobs += self._dispatch_jobs()
self._status_update(status_callback, phase, finished_jobs, total_jobs) self._status_update(phase, finished_jobs)
self.observer.finish()
def _status_update(self, phase, completed, force=False):
if force or completed - self.observer.get_current_value() > \
self.observer.MIN_UPDATE_INTERVAL:
self.observer.update(completed)
"""
def _status_update(self, callback, phase, completed, total, force=False): def _status_update(self, callback, phase, completed, total, force=False):
# always called with force=True at the beginning, so that can # always called with force=True at the beginning, so that can
# be used to set up state. After that, it is called after # be used to set up state. After that, it is called after
@@ -109,6 +125,7 @@ class Dispatcher(object):
if self._last_status_update < 0 or completed >= self._last_status_update + update_interval or completed < self._last_status_update: if self._last_status_update < 0 or completed >= self._last_status_update + update_interval or completed < self._last_status_update:
self._last_status_update = completed self._last_status_update = completed
callback(phase, completed, total) callback(phase, completed, total)
"""
def _dispatch_jobs(self): def _dispatch_jobs(self):
# helper function to dispatch pending jobs when their # helper function to dispatch pending jobs when their
@@ -292,12 +309,12 @@ class MultiprocessingDispatcher(Dispatcher):
"""A subclass of Dispatcher that spawns worker processes and """A subclass of Dispatcher that spawns worker processes and
distributes jobs to them to speed up processing. distributes jobs to them to speed up processing.
""" """
def __init__(self, local_procs=-1, address=None, authkey=None): def __init__(self, local_procs=-1, address=None, authkey=None, observer=None):
"""Creates the dispatcher. local_procs should be the number of """Creates the dispatcher. local_procs should be the number of
worker processes to spawn. If it's omitted (or negative) worker processes to spawn. If it's omitted (or negative)
the number of available CPUs is used instead. the number of available CPUs is used instead.
""" """
super(MultiprocessingDispatcher, self).__init__() super(MultiprocessingDispatcher, self).__init__(observer=observer)
# automatic local_procs handling # automatic local_procs handling
if local_procs < 0: if local_procs < 0:
@@ -405,3 +422,91 @@ class MultiprocessingDispatcher(Dispatcher):
m.connect() m.connect()
p = MultiprocessingDispatcherProcess(m) p = MultiprocessingDispatcherProcess(m)
p.run() p.run()
class Observer(object):
"""
"""
MIN_UPDATE_INTERVAL = 100
def __init__(self, max_value):
self._current_value = None
self._max_value = max_value
self.start_time = None
self.end_time = None
def start(self):
self.start_time = time.time()
self.update(0)
return self
def is_started(self):
return self.start_time is not None
def finish(self):
self.end_time = time.time()
def is_finished(self):
return self.end_time is not None
def is_running(self):
return self.is_started() and not self.is_finished()
def add(self, amount):
self.update(self.get_current_value() + amount)
def update(self, current_value):
"""
"""
self._current_value = current_value
def get_percentage(self):
if self.get_max_value() is 0:
return 100.0
else:
return self.get_current_value() * 100.0 / self.get_max_value()
def get_current_value(self):
return self._current_value
def get_max_value(self):
return self._max_value
class LoggingObserver(Observer):
"""
"""
def update(self, current_value):
super(LoggingObserver, self).update(current_value)
if self.get_max_value() is None:
logging.info("Rendered %d tiles.", self.get_current_value())
else:
logging.info("Rendered %d of %d. %d%% complete",
self.get_current_value(), self.get_max_value(),
self.get_percentage())
default_widgets = ['Rendering: ', progressbar.FractionWidget(), ' (',
progressbar.Percentage(), ') ', progressbar.Bar(left='[', right=']'),
' ', progressbar.ETA()]
class ProgressBarObserver(progressbar.ProgressBar):
"""
"""
MIN_UPDATE_INTERVAL = 100
def __init__(self, max_value, widgets=default_widgets, term_width=None,
fd=sys.stderr):
super(ProgressBarObserver, self).__init__(maxval=max_value,
widgets=widgets, term_width=term_width, fd=fd)
def is_started(self):
return self.start_time is not None
def finish(self):
self._end_time = time.time()
super(ProgressBarObserver, self).finish()
def get_current_value(self):
return self.currval
def get_max_value(self):
return self.maxval

View File

@@ -396,7 +396,8 @@ class TileSet(object):
# Yeah functional programming! # Yeah functional programming!
return { return {
0: lambda: self.dirtytree.count_all(), 0: lambda: self.dirtytree.count_all(),
1: lambda: None, #there is no good way to guess this so just give total count
1: lambda: (4**(self.treedepth+1)-1)/3,
2: lambda: self.dirtytree.count_all(), 2: lambda: self.dirtytree.count_all(),
}[self.options['renderchecks']]() }[self.options['renderchecks']]()