[yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Nov 9 13:24:46 PST 2012


6 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/426c98880ea9/
changeset:   426c98880ea9
branch:      yt
user:        sskory
date:        2012-11-08 00:30:44
summary:     Added tag hop callback for changeset f15825659f5a
affected #:  1 file

diff -r f15825659f5af3ce64aaad30062aff3603cbfb66 -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,4 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback



https://bitbucket.org/yt_analysis/yt/changeset/b7d78d603839/
changeset:   b7d78d603839
branch:      yt
user:        sskory
date:        2012-11-09 22:08:57
summary:     Merging.
affected #:  19 files

diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -478,7 +478,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,9 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -395,3 +397,168 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,7 +26,8 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
 def assert_rel_equal(a1, a2, decimals):
     # We have nan checks in here because occasionally we have fields that get


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,6 +34,7 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
 
 from yt.utilities.logger import disable_stream_logging
@@ -75,6 +76,8 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
         if options.compare_name is not None:
             # Now we grab from our S3 store
             if options.compare_name == "latest":
@@ -132,6 +135,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,26 +146,44 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
         if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
+            dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -312,6 +335,22 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result))
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i])
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
@@ -353,6 +392,25 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
@@ -394,3 +452,60 @@
                     yield PixelizedProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds)
+
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact
+
+


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -172,3 +172,11 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1060,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -407,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -430,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -1170,41 +1173,59 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -209,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -239,6 +243,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -249,11 +257,40 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +310,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -767,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -792,7 +830,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -919,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -975,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -986,8 +1028,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -997,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1057,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1068,8 +1116,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1079,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1116,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1155,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1491,4 +1543,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-



https://bitbucket.org/yt_analysis/yt/changeset/bf4068e9ce0d/
changeset:   bf4068e9ce0d
branch:      yt
user:        sskory
date:        2012-11-09 22:09:27
summary:     Making sure the workgroups are deleted correctly.
affected #:  1 file

diff -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc -r bf4068e9ce0d8b85a1d813eff9329428425907ee yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):
@@ -581,9 +585,7 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                data = np.zeros(0, dtype=dtype) # This only works for
-            elif data is None:
-                data = np.zeros((ncols, 0), dtype=dtype)
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1060,49 +1062,3 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
-class GroupOwnership(ParallelAnalysisInterface):
-    def __init__(self, items):
-        ParallelAnalysisInterface.__init__(self)
-        self.num_items = len(items)
-        self.items = items
-        assert(self.num_items >= self.comm.size)
-        self.owned = range(self.comm.size)
-        self.pointer = 0
-        if parallel_capable:
-            communication_system.push_with_ids([self.comm.rank])
-
-    def __del__(self):
-        if parallel_capable:
-            communication_system.pop()
-
-    def inc(self, n = -1):
-        old_item = self.item
-        if n == -1: n = self.comm.size
-        for i in range(n):
-            if self.pointer >= self.num_items - self.comm.size: break
-            self.owned[self.pointer % self.comm.size] += self.comm.size
-            self.pointer += 1
-        if self.item is not old_item:
-            self.switch()
-            
-    def dec(self, n = -1):
-        old_item = self.item
-        if n == -1: n = self.comm.size
-        for i in range(n):
-            if self.pointer == 0: break
-            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
-            self.pointer -= 1
-        if self.item is not old_item:
-            self.switch()
-
-    _last = None
-    @property
-    def item(self):
-        own = self.owned[self.comm.rank]
-        if self._last != own:
-            self._item = self.items[own]
-            self._last = own
-        return self._item
-
-    def switch(self):
-        pass



https://bitbucket.org/yt_analysis/yt/changeset/fb1152e1af1b/
changeset:   fb1152e1af1b
branch:      yt
user:        sskory
date:        2012-11-09 22:11:18
summary:     Removed tag hop callback
affected #:  1 file

diff -r bf4068e9ce0d8b85a1d813eff9329428425907ee -r fb1152e1af1b2cecb2b31abb92e173e8fa413df3 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5153,3 +5153,5 @@
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback



https://bitbucket.org/yt_analysis/yt/changeset/bcf34c16c801/
changeset:   bcf34c16c801
branch:      yt
user:        sskory
date:        2012-11-09 22:17:45
summary:     Somehow my merge removed the GroupOwnership class.
affected #:  1 file

diff -r fb1152e1af1b2cecb2b31abb92e173e8fa413df3 -r bcf34c16c801cb3d62496984ec34e2a7e89e77c7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -1062,3 +1062,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass



https://bitbucket.org/yt_analysis/yt/changeset/5f6163709ed8/
changeset:   5f6163709ed8
branch:      yt
user:        sskory
date:        2012-11-09 22:19:27
summary:     Something else the merge deleted from parallel_analysis_interface.
affected #:  1 file

diff -r bcf34c16c801cb3d62496984ec34e2a7e89e77c7 -r 5f6163709ed86827de0b2db599113dba8f1b1e96 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -585,7 +585,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list