[yt-svn] commit/yt-3.0: 160 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Nov 19 05:25:02 PST 2012


160 new commits in yt-3.0:


https://bitbucket.org/yt_analysis/yt-3.0/changeset/3e24db731586/
changeset:   3e24db731586
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-10 05:25:57
summary:     Getting closer to full active particle analysis for Enzo 3.0 datasets.
affected #:  5 files

diff -r 97597f12e16394250ba004f098f1a2d4196ee991 -r 3e24db731586159b73dc2fbc931b33958f42aca2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -230,7 +230,7 @@
             gen_obj = self._current_chunk.objs[0]
         try:
             self.pf.field_info[fname].check_available(gen_obj)
-        except NeedsGridType, ngt_exception:
+        except NeedsGridType as ngt_exception:
             rv = np.empty(self.size, dtype="float64")
             ind = 0
             ngz = ngt_exception.ghost_zones
@@ -259,7 +259,7 @@
         try:
             finfo = self._get_field_info(*field)
             finfo.check_available(gen_obj)
-        except NeedsGridType, ngt_exception:
+        except NeedsGridType as ngt_exception:
             if ngt_exception.ghost_zones != 0:
                 raise NotImplementedError
             size = self._count_particles(ftype)
@@ -269,6 +269,7 @@
                 for i,chunk in enumerate(self.chunks(field, "spatial")):
                     x, y, z = (self[ftype, 'particle_position_%s' % ax]
                                for ax in 'xyz')
+                    if x.size == 0: continue
                     mask = self._current_chunk.objs[0].select_particles(
                         self.selector, x, y, z)
                     if mask is None: continue
@@ -289,6 +290,7 @@
             for i,chunk in enumerate(self.chunks([], "spatial")):
                 x, y, z = (self[ftype, 'particle_position_%s' % ax]
                             for ax in 'xyz')
+                if x.size == 0: continue
                 size += self._current_chunk.objs[0].count_particles(
                     self.selector, x, y, z)
         return size
@@ -351,7 +353,7 @@
         if (ftype, fname) in self.pf.field_info:
             return self.pf.field_info[(ftype, fname)]
         if fname in self.pf.field_info:
-           return self.pf.field_info[fname]
+            return self.pf.field_info[fname]
         raise YTFieldNotFound((fname, ftype), self.pf)
 
     def _determine_fields(self, fields):


diff -r 97597f12e16394250ba004f098f1a2d4196ee991 -r 3e24db731586159b73dc2fbc931b33958f42aca2 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -292,6 +292,23 @@
         self.display_name = display_name
         self.not_in_all = not_in_all
 
+    def _copy_def(self):
+        dd = {}
+        dd['name'] = self.name
+        dd['convert_function'] = self._convert_function
+        dd['particle_convert_function'] = self._particle_convert_function
+        dd['units'] = self._units
+        dd['projected_units'] = self._projected_units,
+        dd['take_log'] = self.take_log
+        dd['validators'] = self.validators.copy()
+        dd['particle_type'] = self.particle_type
+        dd['vector_field'] = self.vector_field
+        dd['display_field'] = True
+        dd['not_in_all'] = self.not_in_all
+        dd['display_name'] = self.display_name
+        dd['projection_conversion'] = self.projection_conversion
+        return dd
+
     def check_available(self, data):
         """
         This raises an exception of the appropriate type if the set of


diff -r 97597f12e16394250ba004f098f1a2d4196ee991 -r 3e24db731586159b73dc2fbc931b33958f42aca2 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -490,6 +490,18 @@
         if handle is not None: handle.close()
         return set(fields)
 
+    def _setup_derived_fields(self):
+        super(EnzoHierarchy, self)._setup_derived_fields()
+        aps = self.parameter_file.parameters["AppendActiveParticleType"]
+        for fname, field in self.pf.field_info.items():
+            if not field.particle_type: continue
+            if isinstance(fname, tuple): continue
+            if field._function is NullFunc: continue
+            for apt in aps:
+                dd = field._copy_def()
+                dd.pop("name")
+                add_field((apt, fname), **dd)
+
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.


diff -r 97597f12e16394250ba004f098f1a2d4196ee991 -r 3e24db731586159b73dc2fbc931b33958f42aca2 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -42,6 +42,11 @@
 
 import yt.utilities.lib as amr_utils
 
+def _check_ftype(field):
+    if isinstance(field.name, tuple):
+        return field.name[0]
+    return "all"
+
 EnzoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "EFI")
 add_field = EnzoFieldInfo.add_field
 
@@ -491,30 +496,14 @@
 
 # Particle functions
 
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.hierarchy.io
-        if not data.NumberOfParticles > 0:
-            return np.array([], dtype=dtype)
-        try:
-            return io._read_data_set(data, p_field).astype(dtype)
-        except io._read_exception:
-            pass
-        # This is bad.  But it's the best idea I have right now.
-        return data._read_data(p_field.replace("_"," ")).astype(dtype)
-    return _Particles
 for pf in ["type", "mass"] + \
           ["position_%s" % ax for ax in 'xyz']:
-    pfunc = particle_func("particle_%s" % (pf))
-    add_enzo_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
+    add_enzo_field("particle_%s" % pf, NullFunc, particle_type=True)
     
 def _convRetainInt(data):
     return 1
-add_enzo_field("particle_index", function=particle_func("particle_index", "int64"),
-          validators = [ValidateSpatial(0)], particle_type=True,
-          convert_function=_convRetainInt)
+add_enzo_field("particle_index", function=NullFunc,
+          particle_type=True, convert_function=_convRetainInt)
 
 def _get_vel_convert(ax):
     def _convert_p_vel(data):
@@ -522,20 +511,15 @@
     return _convert_p_vel
 for ax in 'xyz':
     pf = "particle_velocity_%s" % ax
-    pfunc = particle_func(pf)
     cfunc = _get_vel_convert(ax)
-    add_enzo_field(pf, function=pfunc, convert_function=cfunc,
-              validators = [ValidateSpatial(0)],
+    add_enzo_field(pf, function=NullFunc, convert_function=cfunc,
               particle_type=True)
 
 for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
-    pfunc = particle_func(pf)
-    add_enzo_field(pf, function=pfunc,
-              validators = [ValidateSpatial(0),
-                            ValidateDataField(pf)],
+    add_enzo_field(pf, function=NullFunc,
+              validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field("particle_mass", function=particle_func("particle_mass"),
-          particle_type=True)
+add_field("particle_mass", function=NullFunc, particle_type=True)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -547,7 +531,8 @@
           particle_type=True, convert_function=_convertParticleAge)
 
 def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64') * \
+    ftype = _check_ftype(field)
+    particles = data[ftype, "particle_mass"].astype('float64') * \
                 just_one(data["CellVolumeCode"].ravel())
     # Note that we mandate grid-type here, so this is okay
     return particles
@@ -642,3 +627,8 @@
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)
 
+for ax in 'xyz':
+    add_field(("CenOstriker","particle_position_%s" % ax),
+               function=TranslationFunc(("CenOstriker","position_%s" % ax)),
+               particle_type = True)
+


diff -r 97597f12e16394250ba004f098f1a2d4196ee991 -r 3e24db731586159b73dc2fbc931b33958f42aca2 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -375,11 +375,12 @@
 
 class YTDataChunk(object):
 
-    def __init__(self, dobj, chunk_type, objs, data_size):
+    def __init__(self, dobj, chunk_type, objs, data_size, field_type = None):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
         self._data_size = data_size
+        self._field_type = field_type
 
     @property
     def data_size(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/02f23d3ae10d/
changeset:   02f23d3ae10d
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-10 18:12:14
summary:     Initial implementation of a _current_particle_type and _current_fluid_type for
data containers.

Next step is to inherit from YTDataContainer for the GridPatch.
affected #:  5 files

diff -r 3e24db731586159b73dc2fbc931b33958f42aca2 -r 02f23d3ae10d731ee6a219c2baa878cbcc62b837 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -260,7 +260,7 @@
         convs = np.empty(len(fields), dtype="float64")
         fields = self._determine_fields(fields)
         for i, field in enumerate(fields):
-            fi = self._get_field_info(*field)
+            fi = self.pf._get_field_info(*field)
             convs[i] = (self.pf.units[fi.projection_conversion])
         return convs
 


diff -r 3e24db731586159b73dc2fbc931b33958f42aca2 -r 02f23d3ae10d731ee6a219c2baa878cbcc62b837 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -109,6 +109,8 @@
         if pf != None:
             self.pf = pf
             self.hierarchy = pf.hierarchy
+        self._current_particle_type = "all"
+        self._current_fluid_type = self.pf.default_fluid_type
         self.hierarchy.objects.append(weakref.proxy(self))
         mylog.debug("Appending object to %s (type: %s)", self.pf, type(self))
         self.field_data = YTFieldData()
@@ -212,13 +214,14 @@
 
     def _generate_field(self, field):
         ftype, fname = field
-        if fname in self._container_fields:
-            return self._generate_container_field(field)
-        finfo = self._get_field_info(*field)
-        if finfo.particle_type:
-            return self._generate_particle_field(field)
-        else:
-            return self._generate_fluid_field(field)
+        finfo = self.pf._get_field_info(*field)
+        with self._field_type_state(ftype, finfo):
+            if fname in self._container_fields:
+                return self._generate_container_field(field)
+            if finfo.particle_type:
+                return self._generate_particle_field(field)
+            else:
+                return self._generate_fluid_field(field)
 
     def _generate_fluid_field(self, field):
         # First we check the validator
@@ -257,7 +260,7 @@
         else:
             gen_obj = self._current_chunk.objs[0]
         try:
-            finfo = self._get_field_info(*field)
+            finfo = self.pf._get_field_info(*field)
             finfo.check_available(gen_obj)
         except NeedsGridType as ngt_exception:
             if ngt_exception.ghost_zones != 0:
@@ -278,7 +281,8 @@
                     rv[ind:ind+data.size] = data
                     ind += data.size
         else:
-            rv = self._get_field_info(*field)(gen_obj)
+            with self._field_type_state(ftype, finfo, gen_obj):
+                rv = self.pf._get_field_info(*field)(gen_obj)
         return rv
 
     def _count_particles(self, ftype):
@@ -349,13 +353,6 @@
                        for i in self._con_args])
         return s
 
-    def _get_field_info(self, ftype, fname):
-        if (ftype, fname) in self.pf.field_info:
-            return self.pf.field_info[(ftype, fname)]
-        if fname in self.pf.field_info:
-            return self.pf.field_info[fname]
-        raise YTFieldNotFound((fname, ftype), self.pf)
-
     def _determine_fields(self, fields):
         fields = ensure_list(fields)
         explicit_fields = []
@@ -369,14 +366,14 @@
                    not isinstance(field[1], types.StringTypes):
                     raise YTFieldNotParseable(field)
                 ftype, fname = field
-                finfo = self._get_field_info(ftype, fname)
+                finfo = self.pf._get_field_info(ftype, fname)
             else:
                 fname = field
-                finfo = self._get_field_info("unknown", fname)
+                finfo = self.pf._get_field_info("unknown", fname)
                 if finfo.particle_type:
-                    ftype = "all"
+                    ftype = self._current_particle_type
                 else:
-                    ftype = self.pf.default_fluid_type
+                    ftype = self._current_fluid_type
             if finfo.particle_type and ftype not in self.pf.particle_types:
                 raise YTFieldTypeNotFound(ftype)
             elif not finfo.particle_type and ftype not in self.pf.fluid_types:
@@ -443,7 +440,7 @@
         # We now split up into readers for the types of fields
         fluids, particles = [], []
         for ftype, fname in fields_to_get:
-            finfo = self._get_field_info(ftype, fname)
+            finfo = self.pf._get_field_info(ftype, fname)
             if finfo.particle_type:
                 particles.append((ftype, fname))
             elif (ftype, fname) not in fluids:
@@ -473,6 +470,23 @@
                             fields_to_generate.append(f)
 
     @contextmanager
+    def _field_type_state(self, ftype, finfo, obj = None):
+        if obj is None: obj = self
+        old_particle_type = obj._current_particle_type
+        mylog.debug("Old particle type: %s", old_particle_type)
+        old_fluid_type = obj._current_fluid_type
+        mylog.debug("Old fluid type: %s", old_fluid_type)
+        if finfo.particle_type:
+            obj._current_particle_type = ftype
+        else:
+            obj._current_fluid_type = ftype
+        mylog.debug("New particle type: %s", obj._current_particle_type)
+        mylog.debug("New fluid type: %s", obj._current_fluid_type)
+        yield
+        obj._current_particle_type = old_particle_type
+        obj._current_fluid_type = old_fluid_type
+
+    @contextmanager
     def _field_lock(self):
         self._locked = True
         yield


diff -r 3e24db731586159b73dc2fbc931b33958f42aca2 -r 02f23d3ae10d731ee6a219c2baa878cbcc62b837 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -59,7 +59,8 @@
                  'start_index', 'filename', '__weakref__', 'dds',
                  '_child_mask', '_child_indices', '_child_index_mask',
                  '_parent_id', '_children_ids',
-                 '_last_mask', '_last_selector_id']
+                 '_last_mask', '_last_selector_id',
+                 '_current_particle_type', '_current_fluid_type']
 
     def __init__(self, id, filename=None, hierarchy=None):
         self.field_data = YTFieldData()
@@ -71,6 +72,8 @@
         self.start_index = None
         self._last_mask = None
         self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
 
     def get_global_startindex(self):
         """


diff -r 3e24db731586159b73dc2fbc931b33958f42aca2 -r 02f23d3ae10d731ee6a219c2baa878cbcc62b837 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -244,6 +244,13 @@
         else:
             raise YTGeometryNotSupported(self.geometry)
 
+    def _get_field_info(self, ftype, fname):
+        if (ftype, fname) in self.field_info:
+            return self.field_info[(ftype, fname)]
+        if fname in self.field_info:
+            return self.field_info[fname]
+        raise YTFieldNotFound((fname, ftype), self)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)


diff -r 3e24db731586159b73dc2fbc931b33958f42aca2 -r 02f23d3ae10d731ee6a219c2baa878cbcc62b837 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -531,8 +531,7 @@
           particle_type=True, convert_function=_convertParticleAge)
 
 def _ParticleMass(field, data):
-    ftype = _check_ftype(field)
-    particles = data[ftype, "particle_mass"].astype('float64') * \
+    particles = data["particle_mass"].astype('float64') * \
                 just_one(data["CellVolumeCode"].ravel())
     # Note that we mandate grid-type here, so this is okay
     return particles



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9f3b6a38112f/
changeset:   9f3b6a38112f
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-10 19:55:28
summary:     Swapping out __slots__ in the grid patch for subclassing YTDataContainer.
affected #:  1 file

diff -r 02f23d3ae10d731ee6a219c2baa878cbcc62b837 -r 9f3b6a38112f5fc74d4f0e66fb6bc5ddb7843077 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -32,7 +32,7 @@
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
 
-from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.data_containers import YTFieldData, YTDataContainer
 from yt.utilities.definitions import x_dict, y_dict
 from .field_info_container import \
     NeedsGridType, \
@@ -42,7 +42,7 @@
     NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
 
-class AMRGridPatch(object):
+class AMRGridPatch(YTDataContainer):
     _spatial = True
     _num_ghost_zones = 0
     _grids = None
@@ -53,15 +53,6 @@
     _con_args = ('id', 'filename')
     OverlappingSiblings = None
 
-    __slots__ = ['field_data', 'field_parameters', 'id', 'hierarchy', 'pf',
-                 'ActiveDimensions', 'LeftEdge', 'RightEdge', 'Level',
-                 'NumberOfParticles', 'Children', 'Parent',
-                 'start_index', 'filename', '__weakref__', 'dds',
-                 '_child_mask', '_child_indices', '_child_index_mask',
-                 '_parent_id', '_children_ids',
-                 '_last_mask', '_last_selector_id',
-                 '_current_particle_type', '_current_fluid_type']
-
     def __init__(self, id, filename=None, hierarchy=None):
         self.field_data = YTFieldData()
         self.field_parameters = {}



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6e148fe0ec60/
changeset:   6e148fe0ec60
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-11 16:35:52
summary:     Partial success in getting particle data to read correctly.
affected #:  4 files

diff -r 9f3b6a38112f5fc74d4f0e66fb6bc5ddb7843077 -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -353,6 +353,19 @@
                        for i in self._con_args])
         return s
 
+    @contextmanager
+    def _field_type_state(self, ftype, finfo, obj = None):
+        if obj is None: obj = self
+        old_particle_type = obj._current_particle_type
+        old_fluid_type = obj._current_fluid_type
+        if finfo.particle_type:
+            obj._current_particle_type = ftype
+        else:
+            obj._current_fluid_type = ftype
+        yield
+        obj._current_particle_type = old_particle_type
+        obj._current_fluid_type = old_fluid_type
+
     def _determine_fields(self, fields):
         fields = ensure_list(fields)
         explicit_fields = []
@@ -470,23 +483,6 @@
                             fields_to_generate.append(f)
 
     @contextmanager
-    def _field_type_state(self, ftype, finfo, obj = None):
-        if obj is None: obj = self
-        old_particle_type = obj._current_particle_type
-        mylog.debug("Old particle type: %s", old_particle_type)
-        old_fluid_type = obj._current_fluid_type
-        mylog.debug("Old fluid type: %s", old_fluid_type)
-        if finfo.particle_type:
-            obj._current_particle_type = ftype
-        else:
-            obj._current_fluid_type = ftype
-        mylog.debug("New particle type: %s", obj._current_particle_type)
-        mylog.debug("New fluid type: %s", obj._current_fluid_type)
-        yield
-        obj._current_particle_type = old_particle_type
-        obj._current_fluid_type = old_fluid_type
-
-    @contextmanager
     def _field_lock(self):
         self._locked = True
         yield


diff -r 9f3b6a38112f5fc74d4f0e66fb6bc5ddb7843077 -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -26,7 +26,7 @@
 import exceptions
 import pdb
 import weakref
-
+import itertools
 import numpy as np
 
 from yt.funcs import *
@@ -85,29 +85,6 @@
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
-    def get_field_parameter(self, name, default=None):
-        """
-        This is typically only used by derived field functions, but it returns
-        parameters used to generate fields.
-
-        """
-        if self.field_parameters.has_key(name):
-            return self.field_parameters[name]
-        else:
-            return default
-
-    def set_field_parameter(self, name, val):
-        """
-        Here we set up dictionaries that get passed up and down and ultimately
-        to derived fields.
-
-        """
-        self.field_parameters[name] = val
-
-    def has_field_parameter(self, name):
-        """ Checks if a field parameter is set. """
-        return self.field_parameters.has_key(name)
-
     def convert(self, datatype):
         """
         This will attempt to convert a given unit to cgs from code units. It
@@ -116,87 +93,71 @@
         """
         return self.pf[datatype]
 
-    def __repr__(self):
-        # We'll do this the slow way to be clear what's going on
-        s = "%s (%s): " % (self.__class__.__name__, self.pf)
-        s += ", ".join(["%s=%s" % (i, getattr(self,i))
-                        for i in self._con_args])
-        return s
+    def _generate_field(self, field):
+        ftype, fname = field
+        finfo = self.pf._get_field_info(*field)
+        with self._field_type_state(ftype, finfo):
+            if fname in self._container_fields:
+                return self._generate_container_field(field)
+            if finfo.particle_type:
+                return self._generate_particle_field(field)
+            else:
+                return self._generate_fluid_field(field)
 
-    def _generate_field(self, field):
-        if self.pf.field_info.has_key(field):
-            # First we check the validator
-            try:
-                self.pf.field_info[field].check_available(self)
-            except NeedsGridType, ngt_exception:
-                # This is only going to be raised if n_gz > 0
-                n_gz = ngt_exception.ghost_zones
-                f_gz = ngt_exception.fields
-                if f_gz is None:
-                    f_gz = self.pf.field_info[field].get_dependencies(
-                            pf = self.pf).requested
-                gz_grid = self.retrieve_ghost_zones(n_gz, f_gz, smoothed=True)
-                temp_array = self.pf.field_info[field](gz_grid)
-                sl = [slice(n_gz, -n_gz)] * 3
-                self[field] = temp_array[sl]
-            else:
-                self[field] = self.pf.field_info[field](self)
-        else: # Can't find the field, try as it might
-            raise exceptions.KeyError(field)
+    def _generate_fluid_field(self, field):
+        ftype, fname = field
+        # First we check the validator
+        try:
+            self.pf.field_info[field].check_available(self)
+        except NeedsGridType, ngt_exception:
+            # This is only going to be raised if n_gz > 0
+            n_gz = ngt_exception.ghost_zones
+            f_gz = ngt_exception.fields
+            if f_gz is None:
+                f_gz = self.pf.field_info[field].get_dependencies(
+                        pf = self.pf).requested
+            gz_grid = self.retrieve_ghost_zones(n_gz, f_gz, smoothed=True)
+            temp_array = self.pf.field_info[field](gz_grid)
+            sl = [slice(n_gz, -n_gz)] * 3
+            self[field] = temp_array[sl]
+        else:
+            self[field] = self.pf.field_info[field](self)
 
-    def has_key(self, key):
-        return (key in self.field_data)
+    def _generate_particle_field(self, field):
+        ftype, fname = field
+        finfo = self.pf._get_field_info(*field)
+        finfo.check_available(self)
+        with self._field_type_state(ftype, finfo, self):
+            rv = self.pf._get_field_info(*field)(self)
+        return rv
 
-    def __getitem__(self, key):
-        """
-        Returns a single field.  Will add if necessary.
-        """
-        if not self.field_data.has_key(key):
-            self.get_data(key)
-        return self.field_data[key]
-
-    def __setitem__(self, key, val):
-        """
-        Sets a field to be some other value.
-        """
-        self.field_data[key] = val
-
-    def __delitem__(self, key):
-        """
-        Deletes a field
-        """
-        del self.field_data[key]
-
-    def keys(self):
-        return self.field_data.keys()
-
-    def get_data(self, field, convert = True):
+    def get_data(self, fields = None, convert = True):
         """
         Returns a field or set of fields for a key or set of keys
         """
-        if not self.field_data.has_key(field):
-            if field in self.hierarchy.field_list:
+        if fields is None: return
+        fields = self._determine_fields(fields)
+        fields_to_get = [f for f in fields if f not in self.field_data]
+        if len(fields_to_get) == 0:
+            return
+        inspected = 0
+        for field in itertools.cycle(fields_to_get):
+            ftype, fname = field
+            finfo = self.pf._get_field_info(ftype, fname)
+            if fname in self.hierarchy.field_list or \
+               (ftype, fname) in self.hierarchy.field_list:
                 conv_factor = 1.0
-                if self.pf.field_info.has_key(field) and convert == True:
-                    conv_factor = self.pf.field_info[field]._convert_function(self)
-                if self.pf.field_info[field].particle_type and \
-                   self.NumberOfParticles == 0:
-                    # because this gets upcast to float
-                    self[field] = np.array([],dtype='int64')
-                    return self.field_data[field]
-                try:
-                    temp = self.hierarchy.io.pop(self, field)
-                    self[field] = np.multiply(temp, conv_factor, temp)
-                except self.hierarchy.io._read_exception, exc:
-                    if field in self.pf.field_info:
-                        if self.pf.field_info[field].not_in_all:
-                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
-                        else:
-                            raise
-                    else: raise
+                if convert == True:
+                    conv_factor = finfo._convert_function(self)
+                if finfo.particle_type:
+                    temp = self.hierarchy.io._read_particle_data_by_type(
+                            self, (ftype, fname))
+                else:
+                    temp = self.hierarchy.io.pop(self, fieldfname)
+                mylog.debug("Setting %s (%s)", field, self)
+                self[field] = np.multiply(temp, conv_factor, temp)
             else:
                 self._generate_field(field)
-        return self.field_data[field]
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -256,9 +217,9 @@
         all field parameters.
 
         """
+        super(AMRGridPatch, self).clear_data()
         self._del_child_mask()
         self._del_child_indices()
-        self.field_data.clear()
         self._setup_dx()
 
     def check_child_masks(self):
@@ -314,10 +275,6 @@
             del self.retVal
         self.field_data = YTFieldData()
         self.clear_derived_quantities()
-
-    def clear_derived_quantities(self):
-        """ Clears coordinates, child_indices, child_mask. """
-        # Access the property raw-values here
         del self.child_mask
         del self.child_ind
 


diff -r 9f3b6a38112f5fc74d4f0e66fb6bc5ddb7843077 -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -46,6 +46,13 @@
         handle.close()
         return tr.swapaxes(0, 2)
 
+    def _read_particle_data_by_type(self, grid, field):
+        handle = h5py.File(grid.filename)
+        ftype, fname = field
+        tr = handle["/Grid%08i/%s/%s/%s" % (grid.id, field, ftype, fname)][:]
+        handle.close()
+        return tr
+
     def _read_field_names(self, grid):
         return hdf5_light_reader.ReadListOfDatasets(
                     grid.filename, "/Grid%08i" % grid.id)


diff -r 9f3b6a38112f5fc74d4f0e66fb6bc5ddb7843077 -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -194,7 +194,9 @@
             gi = dobj.selector.select_grids(self.grid_left_edge,
                                             self.grid_right_edge)
             grids = list(sorted(self.grids[gi], key = lambda g: g.filename))
-            dobj._chunk_info = np.array(grids, dtype='object')
+            dobj._chunk_info = np.empty(len(grids), dtype='object')
+            for i, g in enumerate(grids):
+                dobj._chunk_info[i] = g
         if getattr(dobj, "size", None) is None:
             dobj.size = self._count_selection(dobj)
             dobj.shape = (dobj.size,)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3116db522333/
changeset:   3116db522333
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-11 16:45:51
summary:     By swapping our grid data back into a selector object, we can go a bit faster.
This may turn out to be a failed experiment.
affected #:  3 files

diff -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 -r 3116db5223330471d814d8b9ad79a827d9e0428a yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -32,7 +32,10 @@
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
 
-from yt.data_objects.data_containers import YTFieldData, YTDataContainer
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
 from yt.utilities.definitions import x_dict, y_dict
 from .field_info_container import \
     NeedsGridType, \
@@ -42,7 +45,7 @@
     NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
 
-class AMRGridPatch(YTDataContainer):
+class AMRGridPatch(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
     _grids = None
@@ -93,72 +96,6 @@
         """
         return self.pf[datatype]
 
-    def _generate_field(self, field):
-        ftype, fname = field
-        finfo = self.pf._get_field_info(*field)
-        with self._field_type_state(ftype, finfo):
-            if fname in self._container_fields:
-                return self._generate_container_field(field)
-            if finfo.particle_type:
-                return self._generate_particle_field(field)
-            else:
-                return self._generate_fluid_field(field)
-
-    def _generate_fluid_field(self, field):
-        ftype, fname = field
-        # First we check the validator
-        try:
-            self.pf.field_info[field].check_available(self)
-        except NeedsGridType, ngt_exception:
-            # This is only going to be raised if n_gz > 0
-            n_gz = ngt_exception.ghost_zones
-            f_gz = ngt_exception.fields
-            if f_gz is None:
-                f_gz = self.pf.field_info[field].get_dependencies(
-                        pf = self.pf).requested
-            gz_grid = self.retrieve_ghost_zones(n_gz, f_gz, smoothed=True)
-            temp_array = self.pf.field_info[field](gz_grid)
-            sl = [slice(n_gz, -n_gz)] * 3
-            self[field] = temp_array[sl]
-        else:
-            self[field] = self.pf.field_info[field](self)
-
-    def _generate_particle_field(self, field):
-        ftype, fname = field
-        finfo = self.pf._get_field_info(*field)
-        finfo.check_available(self)
-        with self._field_type_state(ftype, finfo, self):
-            rv = self.pf._get_field_info(*field)(self)
-        return rv
-
-    def get_data(self, fields = None, convert = True):
-        """
-        Returns a field or set of fields for a key or set of keys
-        """
-        if fields is None: return
-        fields = self._determine_fields(fields)
-        fields_to_get = [f for f in fields if f not in self.field_data]
-        if len(fields_to_get) == 0:
-            return
-        inspected = 0
-        for field in itertools.cycle(fields_to_get):
-            ftype, fname = field
-            finfo = self.pf._get_field_info(ftype, fname)
-            if fname in self.hierarchy.field_list or \
-               (ftype, fname) in self.hierarchy.field_list:
-                conv_factor = 1.0
-                if convert == True:
-                    conv_factor = finfo._convert_function(self)
-                if finfo.particle_type:
-                    temp = self.hierarchy.io._read_particle_data_by_type(
-                            self, (ftype, fname))
-                else:
-                    temp = self.hierarchy.io.pop(self, fieldfname)
-                mylog.debug("Setting %s (%s)", field, self)
-                self[field] = np.multiply(temp, conv_factor, temp)
-            else:
-                self._generate_field(field)
-
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz, at least here.  We probably do elsewhere.
@@ -186,10 +123,6 @@
             [self.LeftEdge[0],  self.RightEdge[1], self.LeftEdge[2]],
             ], dtype='float64')
 
-    @property
-    def shape(self):
-        return tuple(self.ActiveDimensions)
-
     def _generate_overlap_masks(self, axis, LE, RE):
         """
         Generate a mask that shows which cells overlap with arbitrary arrays


diff -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 -r 3116db5223330471d814d8b9ad79a827d9e0428a yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -322,7 +322,7 @@
                     fields_to_read)
         for field in fields_to_read:
             ftype, fname = field
-            finfo = dobj._get_field_info(*field)
+            finfo = self.pf._get_field_info(*field)
             conv_factor = finfo._convert_function(self)
             np.multiply(fields_to_return[field], conv_factor,
                         fields_to_return[field])


diff -r 6e148fe0ec60717b773de4499fd973d14dcd4f73 -r 3116db5223330471d814d8b9ad79a827d9e0428a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -943,3 +943,45 @@
 
 ellipsoid_selector = EllipsoidSelector
 
+cdef class GridSelector(SelectorObject):
+    cdef object ind
+
+    def __init__(self, dobj):
+        self.ind = dobj.id - dobj._id_offset
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void set_bounds(self,
+                         np.float64_t left_edge[3], np.float64_t right_edge[3],
+                         np.float64_t dds[3], int ind[3][2], int *check):
+        check[0] = 0
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges):
+        cdef int ng = left_edges.shape[0]
+        cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.zeros(ng, dtype='uint8')
+        gridi[self.ind] = 1
+        return gridi.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_cells(self, gobj):
+        return gobj.ActiveDimensions.prod()
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_mask(self, gobj):
+        cdef np.ndarray[np.uint8_t, ndim=3] mask 
+        mask = np.ones(gobj.ActiveDimensions, dtype='uint8')
+        return mask.astype("bool")
+
+grid_selector = GridSelector
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8f8cc60a5267/
changeset:   8f8cc60a5267
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-11 16:50:12
summary:     Remove old data IO functions and select all cells in a grid selector.
affected #:  2 files

diff -r 3116db5223330471d814d8b9ad79a827d9e0428a -r 8f8cc60a526733384cb4c8c0100c835626dae1e2 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -40,19 +40,6 @@
     _data_style = "enzo_packed_3d"
     _base = slice(None)
 
-    def _read_data_set(self, grid, field):
-        handle = h5py.File(grid.filename)
-        tr = handle["/Grid%08i/%s" % (grid.id, field)][:]
-        handle.close()
-        return tr.swapaxes(0, 2)
-
-    def _read_particle_data_by_type(self, grid, field):
-        handle = h5py.File(grid.filename)
-        ftype, fname = field
-        tr = handle["/Grid%08i/%s/%s/%s" % (grid.id, field, ftype, fname)][:]
-        handle.close()
-        return tr
-
     def _read_field_names(self, grid):
         return hdf5_light_reader.ReadListOfDatasets(
                     grid.filename, "/Grid%08i" % grid.id)


diff -r 3116db5223330471d814d8b9ad79a827d9e0428a -r 8f8cc60a526733384cb4c8c0100c835626dae1e2 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -983,5 +983,13 @@
         mask = np.ones(gobj.ActiveDimensions, dtype='uint8')
         return mask.astype("bool")
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        return 1
+
+
 grid_selector = GridSelector
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/494c719a068b/
changeset:   494c719a068b
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-11 17:12:34
summary:     Attempt to make grid objects work better for field data and as selection
containers.
affected #:  2 files

diff -r 8f8cc60a526733384cb4c8c0100c835626dae1e2 -r 494c719a068bc1c7fd54365631ad3f1db7769cf1 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -54,6 +54,7 @@
     _type_name = 'grid'
     _skip_add = True
     _con_args = ('id', 'filename')
+    _container_fields = ("dx", "dy", "dz")
     OverlappingSiblings = None
 
     def __init__(self, id, filename=None, hierarchy=None):
@@ -88,6 +89,17 @@
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
+    def __getitem__(self, key):
+        tr = super(AMRGridPatch, self).__getitem__(key)
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.pf._get_field_info(*fields[0])
+        if not finfo.particle_type:
+            return tr.reshape(self.ActiveDimensions)
+        return tr
+
     def convert(self, datatype):
         """
         This will attempt to convert a given unit to cgs from code units. It
@@ -96,6 +108,20 @@
         """
         return self.pf[datatype]
 
+    @property
+    def shape(self):
+        return self.ActiveDimensions
+
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dx":
+            return self._current_chunk.fwidth[:,0]
+        elif field == "dy":
+            return self._current_chunk.fwidth[:,1]
+        elif field == "dz":
+            return self._current_chunk.fwidth[:,2]
+
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz, at least here.  We probably do elsewhere.
@@ -108,7 +134,6 @@
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
         if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
@@ -173,9 +198,6 @@
         #self.Time = h.gridTimes[my_ind,0]
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
-    def __len__(self):
-        return np.prod(self.ActiveDimensions)
-
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()


diff -r 8f8cc60a526733384cb4c8c0100c835626dae1e2 -r 494c719a068bc1c7fd54365631ad3f1db7769cf1 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -199,6 +199,7 @@
                 dobj._chunk_info[i] = g
         if getattr(dobj, "size", None) is None:
             dobj.size = self._count_selection(dobj)
+        if getattr(dobj, "shape", None) is None:
             dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2d9732e93c41/
changeset:   2d9732e93c41
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-11 20:20:07
summary:     By splitting the fluid field reading into a couple different routines, we can
speed up IO for grid fields by ~5x.
affected #:  4 files

diff -r 494c719a068bc1c7fd54365631ad3f1db7769cf1 -r 2d9732e93c41fcbf33579637353d8db9c71974d6 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -924,14 +924,14 @@
         self.global_endindex = None
         YTCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
-
-    def _get_list_of_grids(self):
-        if self._grids is not None: return
+        # We need a buffer region to allow for zones that contribute to the
+        # interpolation but are not directly inside our bounds
         buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
                  / self.pf.domain_dimensions).max()
-        YTCoveringGridBase._get_list_of_grids(self, buffer)
-        # We reverse the order to ensure that coarse grids are first
-        self._grids = self._grids[::-1]
+        self._base_region = self.pf.geometry.region(
+            self.center,
+            self.left_edge - buffer,
+            self.right_edge + buffer)
 
     def get_data(self, field):
         self._get_list_of_grids()


diff -r 494c719a068bc1c7fd54365631ad3f1db7769cf1 -r 2d9732e93c41fcbf33579637353d8db9c71974d6 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -492,7 +492,8 @@
 
     def _setup_derived_fields(self):
         super(EnzoHierarchy, self)._setup_derived_fields()
-        aps = self.parameter_file.parameters["AppendActiveParticleType"]
+        aps = self.parameter_file.parameters.get(
+            "AppendActiveParticleType", [])
         for fname, field in self.pf.field_info.items():
             if not field.particle_type: continue
             if isinstance(fname, tuple): continue


diff -r 494c719a068bc1c7fd54365631ad3f1db7769cf1 -r 2d9732e93c41fcbf33579637353d8db9c71974d6 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -136,18 +136,20 @@
         
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
+        # Now we have to do something unpleasant
+        chunks = list(chunks)
+        if selector.__class__.__name__ == "GridSelector":
+            return self._read_grid_chunk(chunks, fields)
         if any((ftype != "gas" for ftype, fname in fields)):
             raise NotImplementedError
-        # Now we have to do something unpleasant
-        chunks = list(chunks)
         for field in fields:
             ftype, fname = field
             fsize = size
             rv[field] = np.empty(fsize, dtype="float64")
-        ind = 0
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s grids",
                    size, [f2 for f1, f2 in fields], ng)
+        ind = 0
         for chunk in chunks:
             data = self._read_chunk_data(chunk, fields)
             for g in chunk.objs:
@@ -161,6 +163,15 @@
                 data.pop(g.id)
         return rv
 
+    def _read_grid_chunk(self, chunks, fields):
+        sets = [fname for ftype, fname in fields]
+        g = chunks[0].objs[0]
+        rv = hdf5_light_reader.ReadMultipleGrids(
+            g.filename, [g.id], sets, "")[g.id]
+        for ftype, fname in fields:
+            rv[(ftype, fname)] = rv.pop(fname).swapaxes(0,2)
+        return rv
+
     def _read_chunk_data(self, chunk, fields, filter_particles = False,
                          suffix = ""):
         data = {}


diff -r 494c719a068bc1c7fd54365631ad3f1db7769cf1 -r 2d9732e93c41fcbf33579637353d8db9c71974d6 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -303,18 +303,23 @@
         obj = type(class_name, (base,), dd)
         setattr(self, name, obj)
 
+    def _split_fields(self, fields):
+        # This will split fields into either generated or read fields
+        fields_to_read, fields_to_generate = [], []
+        for ftype, fname in fields:
+            if fname in self.field_list or (ftype, fname) in self.field_list:
+                fields_to_read.append((ftype, fname))
+            else:
+                fields_to_generate.append((ftype, fname))
+        return fields_to_read, fields_to_generate
+
     def _read_particle_fields(self, fields, dobj, chunk = None):
         if len(fields) == 0: return {}, []
         selector = dobj.selector
         if chunk is None:
             self._identify_base_chunk(dobj)
         fields_to_return = {}
-        fields_to_read, fields_to_generate = [], []
-        for ftype, fname in fields:
-            if fname in self.field_list or (ftype, fname) in self.field_list:
-                fields_to_read.append((ftype, fname))
-            else:
-                fields_to_generate.append((ftype, fname))
+        fields_to_read, fields_to_generate = self._split_fields(fields)
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
         fields_to_return = self.io._read_particle_selection(
@@ -337,12 +342,7 @@
         else:
             chunk_size = chunk.data_size
         fields_to_return = {}
-        fields_to_read, fields_to_generate = [], []
-        for ftype, fname in fields:
-            if fname in self.field_list:
-                fields_to_read.append((ftype, fname))
-            else:
-                fields_to_generate.append((ftype, fname))
+        fields_to_read, fields_to_generate = self._split_fields(fields)
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
         fields_to_return = self.io._read_fluid_selection(self._chunk_io(dobj),



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b5e0b68ab7b5/
changeset:   b5e0b68ab7b5
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-11 21:34:42
summary:     More optimizations for getting grid IO to be speed competitive
affected #:  3 files

diff -r 2d9732e93c41fcbf33579637353d8db9c71974d6 -r b5e0b68ab7b51d7e07c32640d7712483c32e4ddf yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -244,11 +244,20 @@
         else:
             raise YTGeometryNotSupported(self.geometry)
 
+    _last_freq = None
+    _last_finfo = None
     def _get_field_info(self, ftype, fname):
-        if (ftype, fname) in self.field_info:
-            return self.field_info[(ftype, fname)]
+        field = (ftype, fname)
+        if field == self._last_freq:
+            return self._last_finfo
+        if field in self.field_info:
+            self._last_freq = field
+            self._last_finfo = self.field_info[(ftype, fname)]
+            return self._last_finfo
         if fname in self.field_info:
-            return self.field_info[fname]
+            self._last_freq = field
+            self._last_finfo = self.field_info[fname]
+            return self._last_finfo
         raise YTFieldNotFound((fname, ftype), self)
 
 def _reconstruct_pf(*args, **kwargs):


diff -r 2d9732e93c41fcbf33579637353d8db9c71974d6 -r b5e0b68ab7b51d7e07c32640d7712483c32e4ddf yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -190,7 +190,10 @@
         return self.parameter_file.conversion_factors[unit]
 
     def _identify_base_chunk(self, dobj):
-        if getattr(dobj, "_grids", None) is None:
+        if dobj._type_name == "grid":
+            dobj._chunk_info = np.empty(1, dtype='object')
+            dobj._chunk_info[0] = dobj
+        elif getattr(dobj, "_grids", None) is None:
             gi = dobj.selector.select_grids(self.grid_left_edge,
                                             self.grid_right_edge)
             grids = list(sorted(self.grids[gi], key = lambda g: g.filename))


diff -r 2d9732e93c41fcbf33579637353d8db9c71974d6 -r b5e0b68ab7b51d7e07c32640d7712483c32e4ddf yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -979,9 +979,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     def fill_mask(self, gobj):
-        cdef np.ndarray[np.uint8_t, ndim=3] mask 
-        mask = np.ones(gobj.ActiveDimensions, dtype='uint8')
-        return mask.astype("bool")
+        return None
 
     @cython.boundscheck(False)
     @cython.wraparound(False)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2ac10177cbf6/
changeset:   2ac10177cbf6
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-12 00:30:53
summary:     A few more optimizations.  Now dd[Something] is 30% faster than 2.x.
affected #:  3 files

diff -r b5e0b68ab7b51d7e07c32640d7712483c32e4ddf -r 2ac10177cbf6618c84965a84e00f1c7b829ea0f9 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -244,11 +244,11 @@
         else:
             raise YTGeometryNotSupported(self.geometry)
 
-    _last_freq = None
+    _last_freq = (None, None)
     _last_finfo = None
     def _get_field_info(self, ftype, fname):
         field = (ftype, fname)
-        if field == self._last_freq:
+        if field == self._last_freq or fname == self._last_freq[1]:
             return self._last_finfo
         if field in self.field_info:
             self._last_freq = field


diff -r b5e0b68ab7b51d7e07c32640d7712483c32e4ddf -r 2ac10177cbf6618c84965a84e00f1c7b829ea0f9 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -30,6 +30,7 @@
 from yt.utilities.io_handler import \
     BaseIOHandler, _axis_ids
 from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.selection_routines import mask_fill
 import h5py
 
 import numpy as np
@@ -155,11 +156,12 @@
             for g in chunk.objs:
                 mask = g.select(selector)
                 if mask is None: continue
+                nd = mask.sum()
                 for field in fields:
                     ftype, fname = field
-                    gdata = data[g.id].pop(fname).swapaxes(0,2)[mask]
-                    rv[field][ind:ind+gdata.size] = gdata
-                ind += gdata.size
+                    gdata = data[g.id].pop(fname).swapaxes(0,2)
+                    nd = mask_fill(rv[field], ind, mask, gdata)
+                ind += nd
                 data.pop(g.id)
         return rv
 


diff -r b5e0b68ab7b51d7e07c32640d7712483c32e4ddf -r 2ac10177cbf6618c84965a84e00f1c7b829ea0f9 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -81,6 +81,24 @@
                     cpos += 1
     return indices
 
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def mask_fill(np.ndarray[np.float64_t, ndim=1] out,
+              np.int64_t offset,
+              np.ndarray[np.uint8_t, ndim=3, cast=True] mask,
+              np.ndarray[np.float64_t, ndim=3] vals):
+    cdef np.int64_t count = 0
+    cdef int i, j, k
+    for i in range(mask.shape[0]):
+        for j in range(mask.shape[1]):
+            for k in range(mask.shape[2]):
+                if mask[i,j,k] == 1:
+                    out[offset + count] = vals[i,j,k]
+                    count += 1
+    return count
+
 # Inclined Box
 
 cdef class SelectorObject:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b6911a8b76f4/
changeset:   b6911a8b76f4
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-12 03:52:29
summary:     We need to implement fill_mask to get GridSelectors to work for things like the
container fields.
affected #:  1 file

diff -r 2ac10177cbf6618c84965a84e00f1c7b829ea0f9 -r b6911a8b76f452b84bb4bc0ce6155babb88129ff yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -997,7 +997,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     def fill_mask(self, gobj):
-        return None
+        return np.ones(gobj.ActiveDimensions, dtype='bool')
 
     @cython.boundscheck(False)
     @cython.wraparound(False)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2c1073f374f8/
changeset:   2c1073f374f8
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-19 14:12:26
summary:     Using a fused type for mask_fill to get it to work with 32 bit data and fixing
ImageArray breakages that accessed field_info directly.
affected #:  3 files

diff -r b6911a8b76f452b84bb4bc0ce6155babb88129ff -r 2c1073f374f88915370e9354472e3c4d75253bd2 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -46,6 +46,10 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box
@@ -85,10 +89,10 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def mask_fill(np.ndarray[np.float64_t, ndim=1] out,
-              np.int64_t offset,
-              np.ndarray[np.uint8_t, ndim=3, cast=True] mask,
-              np.ndarray[np.float64_t, ndim=3] vals):
+cdef _mask_fill(np.ndarray[np.float64_t, ndim=1] out,
+                np.int64_t offset,
+                np.ndarray[np.uint8_t, ndim=3, cast=True] mask,
+                np.ndarray[anyfloat, ndim=3] vals):
     cdef np.int64_t count = 0
     cdef int i, j, k
     for i in range(mask.shape[0]):
@@ -99,6 +103,17 @@
                     count += 1
     return count
 
+def mask_fill(np.ndarray[np.float64_t, ndim=1] out,
+              np.int64_t offset,
+              np.ndarray[np.uint8_t, ndim=3, cast=True] mask,
+              np.ndarray vals):
+    if vals.dtype == np.float32:
+        return _mask_fill[np.float32_t](out, offset, mask, vals)
+    elif vals.dtype == np.float64:
+        return _mask_fill[np.float64_t](out, offset, mask, vals)
+    else:
+        raise RuntimeError
+
 # Inclined Box
 
 cdef class SelectorObject:


diff -r b6911a8b76f452b84bb4bc0ce6155babb88129ff -r 2c1073f374f88915370e9354472e3c4d75253bd2 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -152,15 +152,15 @@
 
     def _get_info(self, item):
         info = {}
+        finfo = self.data_source.pf._get_field_info(*item)
         info['data_source'] = self.data_source.__str__()  
         info['axis'] = self.data_source.axis
         info['field'] = str(item)
-        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['units'] = finfo.get_units()
         info['xlim'] = self.bounds[:2]
         info['ylim'] = self.bounds[2:]
         info['length_to_cm'] = self.data_source.pf['cm']
-        info['projected_units'] = \
-                self.data_source.pf.field_info[item].get_projected_units()
+        info['projected_units'] = finfo.get_projected_units()
         info['center'] = self.data_source.center
         try:
             info['coord'] = self.data_source.coord


diff -r b6911a8b76f452b84bb4bc0ce6155babb88129ff -r 2c1073f374f88915370e9354472e3c4d75253bd2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -518,7 +518,7 @@
         self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
         for field in self._frb.data.keys():
-            finfo = self.data_source._get_field_info(*field)
+            finfo = self.data_source.pf._get_field_info(*field)
             if finfo.take_log:
                 self._field_transform[field] = log_transform
             else:
@@ -709,7 +709,7 @@
         ds = self._frb.data_source
         pf = self.pf
         field = self.data_source._determine_fields(field)[0]
-        finfo = self.data_source._get_field_info(*field)
+        finfo = self.data_source.pf._get_field_info(*field)
         if ds._type_name in ("slice", "cutting"):
             units = finfo.get_units()
         elif ds._type_name == "proj" and (ds.weight_field is not None or 
@@ -848,7 +848,7 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             ftype, fname = f
-            field_name = self.data_source._get_field_info(ftype, fname).display_name
+            field_name = self.data_source.pf._get_field_info(ftype, fname).display_name
 
             if field_name is None:
                 field_name = r'$\rm{'+fname+r'}$'
@@ -903,7 +903,7 @@
         if field == 'all':
             fields = self.plots.keys()
         else:
-            fields = [field]
+            fields = self.data_source._determine_fields([field])
 
         for field in fields:
             self._colorbar_valid = False
@@ -1518,7 +1518,7 @@
         field = self.data_source._determine_fields(field)[0]
         self._current_field = field
         self._frb[field]
-        finfo = self.data_source._get_field_info(*field)
+        finfo = self.data_source.pf._get_field_info(*field)
         if finfo.take_log:
             self._field_transform[field] = log_transform
         else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5b89c5ee1aff/
changeset:   5b89c5ee1aff
branch:      yt
user:        MatthewTurk
date:        2012-11-05 21:31:37
summary:     The to_pw method was missing a setting of self.pf.  I got confused in the class
hierarchy last week when I made this change.
affected #:  1 file

diff -r 8e2150167715b63ab413670576ea18a08e9f72bd -r 5b89c5ee1aff0971bde76a52410df526dcb6dde0 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -243,6 +243,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+        self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}



https://bitbucket.org/yt_analysis/yt-3.0/changeset/71f6d234a15c/
changeset:   71f6d234a15c
branch:      yt
user:        MatthewTurk
date:        2012-11-07 13:15:59
summary:     Fixing undefined variable
affected #:  1 file

diff -r 5b89c5ee1aff0971bde76a52410df526dcb6dde0 -r 71f6d234a15c686aef3c04b6967d3e095fe373d3 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -246,7 +246,7 @@
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf) 
-        self.ts = ts
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7f4089b10365/
changeset:   7f4089b10365
branch:      yt
user:        MatthewTurk
date:        2012-11-07 20:41:06
summary:     Adding the ability to pass in pf_fn as a pf, and a requires_output_log
decorator.
affected #:  1 file

diff -r 71f6d234a15c686aef3c04b6967d3e095fe373d3 -r 7f4089b10365fec94474e06b07a98892298f3233 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -132,6 +132,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,6 +143,7 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
@@ -353,6 +356,15 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def requires_outputlog():
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if os.path.exists("OutputLog"):
+        return ftrue
+    return ffalse
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d5ffc19d569f/
changeset:   d5ffc19d569f
branch:      yt
user:        MatthewTurk
date:        2012-11-07 20:52:19
summary:     Fixing missing StaticOutput in testing framework
affected #:  1 file

diff -r 7f4089b10365fec94474e06b07a98892298f3233 -r d5ffc19d569f4bc2ad5ec478b65f2569fa94b349 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,6 +34,7 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
 
 from yt.utilities.logger import disable_stream_logging



https://bitbucket.org/yt_analysis/yt-3.0/changeset/97cad0ed7f04/
changeset:   97cad0ed7f04
branch:      yt
user:        brittonsmith
date:        2012-11-07 21:20:31
summary:     Adding VerifySimulationSameTest.
affected #:  1 file

diff -r 7f4089b10365fec94474e06b07a98892298f3233 -r 97cad0ed7f04233852f8509f9b43bc923f861e51 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -315,6 +315,22 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result))
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i])
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/353dd610ecef/
changeset:   353dd610ecef
branch:      yt
user:        brittonsmith
date:        2012-11-07 21:20:44
summary:     Merged.
affected #:  1 file

diff -r 97cad0ed7f04233852f8509f9b43bc923f861e51 -r 353dd610ecef29427f9c333c7e2581bd0ffcfeb9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,6 +34,7 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
 
 from yt.utilities.logger import disable_stream_logging



https://bitbucket.org/yt_analysis/yt-3.0/changeset/41c8bf82c39f/
changeset:   41c8bf82c39f
branch:      yt
user:        MatthewTurk
date:        2012-11-07 21:28:54
summary:     Adding sim_dir_load and an option path argument to requires_outputlog
affected #:  1 file

diff -r 353dd610ecef29427f9c333c7e2581bd0ffcfeb9 -r 41c8bf82c39fdb3bd150823ae52fec95d9fa2fbe yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -150,6 +150,14 @@
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo"):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return Simulation(sim_fn, sim_type = sim_type)
+
 class AnswerTestingTest(object):
     reference_storage = None
     def __init__(self, pf_fn):
@@ -373,13 +381,16 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_outputlog():
+def requires_outputlog(path = "."):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
         return func
     if os.path.exists("OutputLog"):
         return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
     return ffalse
 
 def requires_pf(pf_fn, big_data = False):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e936dc471751/
changeset:   e936dc471751
branch:      yt
user:        ngoldbaum
date:        2012-11-07 21:39:29
summary:     Fixing the docstring in the timestamp callback.
affected #:  1 file

diff -r 41c8bf82c39fdb3bd150823ae52fec95d9fa2fbe -r e936dc471751440fb68c417a6bb97d56dc3fad8e yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1170,10 +1170,11 @@
           }
     _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized = False, 
-                 bbox_dict = None, **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/72c09b97942f/
changeset:   72c09b97942f
branch:      yt
user:        MatthewTurk
date:        2012-11-07 21:50:14
summary:     Fixing unbound local
affected #:  1 file

diff -r 41c8bf82c39fdb3bd150823ae52fec95d9fa2fbe -r 72c09b97942f4764541f8ab5d64e5b86ad325680 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -144,7 +144,7 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
-    if isinstance(pf_fn, StaticOutput): return pf
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h



https://bitbucket.org/yt_analysis/yt-3.0/changeset/fc5e758cc8d0/
changeset:   fc5e758cc8d0
branch:      yt
user:        MatthewTurk
date:        2012-11-07 21:50:31
summary:     Merge
affected #:  1 file

diff -r 72c09b97942f4764541f8ab5d64e5b86ad325680 -r fc5e758cc8d0499e7e4006fe187720cc342b06bf yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1170,10 +1170,11 @@
           }
     _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized = False, 
-                 bbox_dict = None, **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2279e0e8385d/
changeset:   2279e0e8385d
branch:      yt
user:        MatthewTurk
date:        2012-11-07 21:53:25
summary:     Adding ability to skip comparison
affected #:  1 file

diff -r fc5e758cc8d0499e7e4006fe187720cc342b06bf -r 2279e0e8385d7c76fa12d9b6129869fa3ddb63aa yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -76,6 +76,8 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
         if options.compare_name is not None:
             # Now we grab from our S3 store
             if options.compare_name == "latest":



https://bitbucket.org/yt_analysis/yt-3.0/changeset/fabc584792ca/
changeset:   fabc584792ca
branch:      yt
user:        brittonsmith
date:        2012-11-07 21:50:33
summary:     Fixing small bugs.
affected #:  2 files

diff -r e936dc471751440fb68c417a6bb97d56dc3fad8e -r fabc584792cae260e905755b7310d7fa27cc1777 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -478,7 +478,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r e936dc471751440fb68c417a6bb97d56dc3fad8e -r fabc584792cae260e905755b7310d7fa27cc1777 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -156,7 +156,7 @@
     if os.path.exists(sim_fn) or not path:
         path = "."
     with temp_cwd(path):
-        return Simulation(sim_fn, sim_type = sim_type)
+        return simulation(sim_fn, sim_type)
 
 class AnswerTestingTest(object):
     reference_storage = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a50c508eecf6/
changeset:   a50c508eecf6
branch:      yt
user:        brittonsmith
date:        2012-11-07 21:50:54
summary:     Merged.
affected #:  1 file

diff -r fabc584792cae260e905755b7310d7fa27cc1777 -r a50c508eecf615eee7045a2fbd8d341ef7fd12d6 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -144,7 +144,7 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
-    if isinstance(pf_fn, StaticOutput): return pf
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h



https://bitbucket.org/yt_analysis/yt-3.0/changeset/52d3a540a8cf/
changeset:   52d3a540a8cf
branch:      yt
user:        brittonsmith
date:        2012-11-07 22:58:00
summary:     Merged.
affected #:  1 file

diff -r a50c508eecf615eee7045a2fbd8d341ef7fd12d6 -r 52d3a540a8cf363189cd075d4cae16ab311bcec7 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -76,6 +76,8 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
         if options.compare_name is not None:
             # Now we grab from our S3 store
             if options.compare_name == "latest":



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9423fbdc1ca7/
changeset:   9423fbdc1ca7
branch:      yt
user:        MatthewTurk
date:        2012-11-07 23:06:32
summary:     Adding a prefix for result storage
affected #:  1 file

diff -r 52d3a540a8cf363189cd075d4cae16ab311bcec7 -r 9423fbdc1ca7bcc023a66b68fd912a852c6427ff yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -162,19 +162,24 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
         if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
+            dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        return "%s_%s" % (self.prefix, self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -383,11 +388,16 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_outputlog(path = "."):
+def requires_outputlog(path = ".", prefix = ""):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
-        return func
+        def fyielder(*args, **kwargs):
+            for t in func(*args, **kwargs):
+                if isinstance(t, AnswerTestingTest):
+                    t.prefix = prefix
+                yield t
+        return fyielder
     if os.path.exists("OutputLog"):
         return ftrue
     with temp_cwd(path):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/350b37e4bf20/
changeset:   350b37e4bf20
branch:      yt
user:        brittonsmith
date:        2012-11-07 23:24:05
summary:     Matt fixing bug.
affected #:  1 file

diff -r 9423fbdc1ca7bcc023a66b68fd912a852c6427ff -r 350b37e4bf205ccf97496c5563119bbcf36fe3a3 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -392,6 +392,7 @@
     def ffalse(func):
         return lambda: None
     def ftrue(func):
+        @wraps(func)
         def fyielder(*args, **kwargs):
             for t in func(*args, **kwargs):
                 if isinstance(t, AnswerTestingTest):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/93824c3b33ea/
changeset:   93824c3b33ea
branch:      yt
user:        MatthewTurk
date:        2012-11-07 23:35:14
summary:     Simple hotfix for prefixing storage names
affected #:  1 file

diff -r 9423fbdc1ca7bcc023a66b68fd912a852c6427ff -r 93824c3b33ea1a1630891598afb6ee0f455bf1c4 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -179,7 +179,9 @@
 
     @property
     def storage_name(self):
-        return "%s_%s" % (self.prefix, self.pf)
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError



https://bitbucket.org/yt_analysis/yt-3.0/changeset/165822e82706/
changeset:   165822e82706
branch:      yt
user:        MatthewTurk
date:        2012-11-07 23:35:34
summary:     Merging
affected #:  1 file

diff -r 93824c3b33ea1a1630891598afb6ee0f455bf1c4 -r 165822e82706aad84cb5bd658100c501823099ca yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -394,6 +394,7 @@
     def ffalse(func):
         return lambda: None
     def ftrue(func):
+        @wraps(func)
         def fyielder(*args, **kwargs):
             for t in func(*args, **kwargs):
                 if isinstance(t, AnswerTestingTest):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/63f30562adb4/
changeset:   63f30562adb4
branch:      yt
user:        sskory
date:        2012-11-07 23:54:13
summary:     Making the hop circle callback a bit prettier.
affected #:  1 file

diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r 63f30562adb4bd81f827ee5bfa72953ab3163234 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -809,6 +809,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -825,18 +826,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.3 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f15825659f5a/
changeset:   f15825659f5a
branch:      yt
user:        sskory
date:        2012-11-08 00:03:20
summary:     Small mistake.
affected #:  1 file

diff -r 63f30562adb4bd81f827ee5bfa72953ab3163234 -r f15825659f5af3ce64aaad30062aff3603cbfb66 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -826,7 +826,7 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            color = np.ones(3) * (0.3 * (num - halo.id)/ num) + 0.6
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
             cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e56349a44a49/
changeset:   e56349a44a49
branch:      yt
user:        MatthewTurk
date:        2012-11-08 02:37:08
summary:     Merged in sskory/yt (pull request #329)
affected #:  1 file

diff -r 165822e82706aad84cb5bd658100c501823099ca -r e56349a44a49cec1a7c3af00300dd3daa566f964 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -809,6 +809,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -825,18 +826,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f30c374527b4/
changeset:   f30c374527b4
branch:      yt
user:        brittonsmith
date:        2012-11-08 03:19:56
summary:     Adding find_outputs keyword to sim_dir_load.
affected #:  1 file

diff -r 52d3a540a8cf363189cd075d4cae16ab311bcec7 -r f30c374527b4591e233cc10fd3dabb7718fb3ec0 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -152,13 +152,15 @@
         pf.h
         return pf
 
-def sim_dir_load(sim_fn, path = None, sim_type = "Enzo"):
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
     if path is None and not os.path.exists(sim_fn):
         raise IOError
     if os.path.exists(sim_fn) or not path:
         path = "."
     with temp_cwd(path):
-        return simulation(sim_fn, sim_type)
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
 
 class AnswerTestingTest(object):
     reference_storage = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/92c62c32ab33/
changeset:   92c62c32ab33
branch:      yt
user:        brittonsmith
date:        2012-11-08 03:20:15
summary:     Merged.
affected #:  2 files

diff -r f30c374527b4591e233cc10fd3dabb7718fb3ec0 -r 92c62c32ab3388ef40f7b71e0f936947528755a0 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -164,19 +164,26 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
         if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
+            dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -385,11 +392,17 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_outputlog(path = "."):
+def requires_outputlog(path = ".", prefix = ""):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
-        return func
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            for t in func(*args, **kwargs):
+                if isinstance(t, AnswerTestingTest):
+                    t.prefix = prefix
+                yield t
+        return fyielder
     if os.path.exists("OutputLog"):
         return ftrue
     with temp_cwd(path):


diff -r f30c374527b4591e233cc10fd3dabb7718fb3ec0 -r 92c62c32ab3388ef40f7b71e0f936947528755a0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -809,6 +809,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -825,18 +826,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/24c8d678fd5b/
changeset:   24c8d678fd5b
branch:      yt
user:        MatthewTurk
date:        2012-11-08 04:03:38
summary:     We need to change to temp_cwd inside the test yielding
affected #:  1 file

diff -r 92c62c32ab3388ef40f7b71e0f936947528755a0 -r 24c8d678fd5b78afa853034f45e4ddffdb782a71 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -398,10 +398,11 @@
     def ftrue(func):
         @wraps(func)
         def fyielder(*args, **kwargs):
-            for t in func(*args, **kwargs):
-                if isinstance(t, AnswerTestingTest):
-                    t.prefix = prefix
-                yield t
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
         return fyielder
     if os.path.exists("OutputLog"):
         return ftrue



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1575fdb073cb/
changeset:   1575fdb073cb
branch:      yt
user:        samskillman
date:        2012-11-07 21:29:55
summary:     Some upgrades for the fixed resolution buffer and image array.
affected #:  1 file

diff -r 3972786cc04e9e66747a2f49196675e45ee66408 -r 1575fdb073cb3ca41e5fb424e0823623cf550df8 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -407,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -430,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e5fabbeb8385/
changeset:   e5fabbeb8385
branch:      yt
user:        samskillman
date:        2012-11-08 05:38:42
summary:     Adding assert_allclose for testing.
affected #:  1 file

diff -r 1575fdb073cb3ca41e5fb424e0823623cf550df8 -r e5fabbeb838531c827baa5acf54e2b4aaed84785 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,7 +26,8 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
 def assert_rel_equal(a1, a2, decimals):
     # We have nan checks in here because occasionally we have fields that get



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ced3cc36250b/
changeset:   ced3cc36250b
branch:      yt
user:        samskillman
date:        2012-11-08 05:39:11
summary:     Merging
affected #:  9 files

diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -478,7 +478,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,8 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids
 
 from .fields import \
       KnownStreamFields, \


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -395,3 +395,103 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,6 +34,7 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
 
 from yt.utilities.logger import disable_stream_logging
@@ -75,6 +76,8 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
         if options.compare_name is not None:
             # Now we grab from our S3 store
             if options.compare_name == "latest":
@@ -132,6 +135,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,26 +146,44 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
         if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
+            dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -312,6 +335,22 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result))
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i])
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
@@ -353,6 +392,25 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1060,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -375,12 +375,10 @@
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
     def __init__(self, field_x, field_y, factor = 16,
-                 density = 1, arrowsize = 1, arrowstyle = None,
-                 color = None, normalize = False):
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
-                             arrowsize = 1, arrowstyle = None,
-                             color = None, normalize = False):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints like
@@ -392,12 +390,8 @@
         self.bv_x = self.bv_y = 0
         self.factor = factor
         self.dens = density
-        self.arrowsize = arrowsize
-        if arrowstyle is None : arrowstyle='-|>'
-        self.arrowstyle = arrowstyle
-        if color is None : color = "#000000"
-        self.color = color
-        self.normalize = normalize
+        if plot_args is None: plot_args = {}
+        self.plot_args = plot_args
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -421,15 +415,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
-        if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
-            pixX /= nn
-            pixY /= nn
-        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
-                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
-                              color=self.color, norm=self.normalize)
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -743,13 +732,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -815,6 +809,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -831,18 +826,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1174,41 +1170,59 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 


diff -r e5fabbeb838531c827baa5acf54e2b4aaed84785 -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -239,6 +243,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -254,6 +262,34 @@
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +309,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -783,14 +818,13 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
             if md['units'] == None or md['units'] == '':
                 label = field_name
             else:
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
@@ -987,8 +1021,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1069,8 +1107,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1492,4 +1533,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5c4bcee82cc5/
changeset:   5c4bcee82cc5
branch:      yt
user:        samskillman
date:        2012-11-08 16:26:55
summary:     Adding ShockTubeTest to framework.py for enzo testing.
affected #:  1 file

diff -r ced3cc36250bf0cf797ed7b42d56c39498ab0772 -r 5c4bcee82cc5fc730aa3c8d478db82eae801f6a9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -452,3 +452,44 @@
                     yield PixelizedProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds)
+
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact
+
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/369da2bf6c0a/
changeset:   369da2bf6c0a
branch:      yt
user:        brittonsmith
date:        2012-11-08 17:56:14
summary:     Adding a distinct test for running with the enzo test runner.
affected #:  1 file

diff -r 5c4bcee82cc5fc730aa3c8d478db82eae801f6a9 -r 369da2bf6c0a8fc4f045f1abebe57382648d6f14 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -453,6 +453,22 @@
                         pf_fn, axis, field, weight_field,
                         ds)
 
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 
                  left_edges, right_edges, rtol, atol):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0c2ee509685b/
changeset:   0c2ee509685b
branch:      yt
user:        ngoldbaum
date:        2012-11-08 19:55:57
summary:     Fixing the clump callback.
affected #:  1 file

diff -r e936dc471751440fb68c417a6bb97d56dc3fad8e -r 0c2ee509685b9a38923bd4fa968fbe52f462a6c5 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/26ca29ffd1d1/
changeset:   26ca29ffd1d1
branch:      yt
user:        ngoldbaum
date:        2012-11-08 19:56:28
summary:     Merging.
affected #:  1 file

diff -r 369da2bf6c0a8fc4f045f1abebe57382648d6f14 -r 26ca29ffd1d14a695209102bfc422872480e4804 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a42e3c57dec9/
changeset:   a42e3c57dec9
branch:      yt
user:        chiffre
date:        2012-11-07 13:38:18
summary:     Added a method to calculate gradients
affected #:  3 files

diff -r 5b89c5ee1aff0971bde76a52410df526dcb6dde0 -r a42e3c57dec91bdcb2f09c0ab43cf03127ebe1d0 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 5b89c5ee1aff0971bde76a52410df526dcb6dde0 -r a42e3c57dec91bdcb2f09c0ab43cf03127ebe1d0 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 5b89c5ee1aff0971bde76a52410df526dcb6dde0 -r a42e3c57dec91bdcb2f09c0ab43cf03127ebe1d0 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/92d18acc6204/
changeset:   92d18acc6204
branch:      yt
user:        chiffre
date:        2012-11-07 13:39:47
summary:     Added a method to calculate gradients
affected #:  1 file

diff -r a42e3c57dec91bdcb2f09c0ab43cf03127ebe1d0 -r 92d18acc62047cb618dc6f3bf23d77154c9daeae yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -246,7 +246,7 @@
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf) 
-        self.ts = ts
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1cad28f3bb60/
changeset:   1cad28f3bb60
branch:      yt
user:        MatthewTurk
date:        2012-11-08 20:30:37
summary:     Merged in chiffre/yt (pull request #328)
affected #:  3 files

diff -r 26ca29ffd1d14a695209102bfc422872480e4804 -r 1cad28f3bb6015827e9a2b3835ec4cd847b3efe2 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 26ca29ffd1d14a695209102bfc422872480e4804 -r 1cad28f3bb6015827e9a2b3835ec4cd847b3efe2 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 26ca29ffd1d14a695209102bfc422872480e4804 -r 1cad28f3bb6015827e9a2b3835ec4cd847b3efe2 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/20d4130210bd/
changeset:   20d4130210bd
branch:      yt
user:        ngoldbaum
date:        2012-11-02 22:48:55
summary:     The font size for the axis, colorbar, and tick labels is now controllable via a fontsize keyword argument.
Choosing a default that's somewhat bigger than the matplotlib defaults.
affected #:  1 file

diff -r 8e2150167715b63ab413670576ea18a08e9f72bd -r 20d4130210bdc8503bd1a895fda4b38fc2c99a6e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -213,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -253,6 +253,7 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -798,8 +799,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -823,7 +826,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -950,7 +955,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1006,6 +1011,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1032,7 +1039,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1092,6 +1099,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1117,7 +1126,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1154,7 +1163,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1193,7 +1203,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b9d9aca343f5/
changeset:   b9d9aca343f5
branch:      yt
user:        MatthewTurk
date:        2012-11-08 20:32:13
summary:     Merged in ngoldbaum/yt (pull request #326)
affected #:  1 file

diff -r 1cad28f3bb6015827e9a2b3835ec4cd847b3efe2 -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -213,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -257,6 +257,7 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -802,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -827,7 +830,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -954,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1010,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1036,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1096,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1121,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1158,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1197,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5c6832662c1d/
changeset:   5c6832662c1d
branch:      yt
user:        MatthewTurk
date:        2012-10-04 03:44:21
summary:     Adding some routines for refinement, including identifying subgrids.
affected #:  2 files

diff -r 59154579b465a3bc1d01723d7b8680526d4b784d -r 5c6832662c1d74536cdc4cfa483be91a77dd7565 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -27,12 +27,6 @@
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +40,57 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        self.sigs = []
+        flagged = np.zeros(self.grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(flagged.sum(axis=d1).sum(axis=d2))
+        self.flagged = flagged
+
+    def find_by_zero_signature(self, dim):
+        ge = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            i = 0
+            while i < sig.size:
+                if sig[i] != 0:
+                    grid_ends[ng, 0] = i
+                    while i < sig.size and sig[i] != 0:
+                        i += 1
+                    grid_ends[ng, 1] = i - 1
+                    ng += 1
+                i += 1
+            ge.append(grid_ends[:ng,:])
+        return ge
+
+    def find_by_second_derivative(flagged, dim):
+        ze = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-1):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            ze.append(zero_cross)
+        return ze


diff -r 59154579b465a3bc1d01723d7b8680526d4b784d -r 5c6832662c1d74536cdc4cfa483be91a77dd7565 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,42 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+class TopHatSphere(object):
+    def __init__(self, radius, center, value, field = "Density"):
+        self.radius = radius
+        self.center = center
+        self.value = value
+        self.field = field
+        
+    def __call__(self, grid):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        grid[self.field][ind] += self.value
+
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/480cfbd60e2d/
changeset:   480cfbd60e2d
branch:      yt
user:        MatthewTurk
date:        2012-10-05 06:03:46
summary:     Sketching out a bit more of the data modifiers, and adding a random fluctuation
affected #:  2 files

diff -r 5c6832662c1d74536cdc4cfa483be91a77dd7565 -r 480cfbd60e2db8773d03675928c1ebebce6b7e4c yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -57,7 +57,7 @@
             self.sigs.append(flagged.sum(axis=d1).sum(axis=d2))
         self.flagged = flagged
 
-    def find_by_zero_signature(self, dim):
+    def find_by_zero_signature(self):
         ge = []
         for dim in range(3):
             sig = self.sigs[dim]
@@ -75,14 +75,14 @@
             ge.append(grid_ends[:ng,:])
         return ge
 
-    def find_by_second_derivative(flagged, dim):
+    def find_by_second_derivative(self):
         ze = []
         for dim in range(3):
             sig = self.sigs[dim]
             sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
             grid_ends = np.zeros((sig.size, 2))
             ng = 0
-            center = int((flagged.shape[dim] - 1) / 2)
+            center = int((self.flagged.shape[dim] - 1) / 2)
             strength = zero_strength = 0
             for i in range(1, sig.size-1):
                 # Note that sd is offset by one


diff -r 5c6832662c1d74536cdc4cfa483be91a77dd7565 -r 480cfbd60e2db8773d03675928c1ebebce6b7e4c yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -23,20 +23,37 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-class TopHatSphere(object):
-    def __init__(self, radius, center, value, field = "Density"):
+import numpy as np
+
+class DataModifier(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(DataModifier):
+    def __init__(self, radius, center, fields):
         self.radius = radius
         self.center = center
-        self.value = value
-        self.field = field
+        self.fields = fields
         
-    def __call__(self, grid):
+    def __call__(self, grid, sub_select = None):
         r = np.zeros(grid.ActiveDimensions, dtype="float64")
         for i, ax in enumerate("xyz"):
             np.add(r, (grid[ax] - self.center[i])**2.0, r)
         np.sqrt(r, r)
         ind = (r <= self.radius)
-        grid[self.field][ind] += self.value
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][r < self.radius] = val
 
-    def apply(self, pf):
-        for g in pf.h.grids: self(g)
+class RandomFluctuation(DataModifier):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc



https://bitbucket.org/yt_analysis/yt-3.0/changeset/173eb46cbdd3/
changeset:   173eb46cbdd3
branch:      yt
user:        MatthewTurk
date:        2012-10-11 23:53:24
summary:     A few more changes to the FluidOperator stuff for initial conditions
affected #:  1 file

diff -r 5c6832662c1d74536cdc4cfa483be91a77dd7565 -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -23,7 +23,11 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-class TopHatSphere(object):
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
     def __init__(self, radius, center, value, field = "Density"):
         self.radius = radius
         self.center = center
@@ -37,6 +41,3 @@
         np.sqrt(r, r)
         ind = (r <= self.radius)
         grid[self.field][ind] += self.value
-
-    def apply(self, pf):
-        for g in pf.h.grids: self(g)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e7456093b8db/
changeset:   e7456093b8db
branch:      yt
user:        MatthewTurk
date:        2012-11-02 12:18:03
summary:     Merging from tip
affected #:  59 files

diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -220,11 +220,24 @@
         echo "  * libncurses5-dev"
         echo "  * zip"
         echo "  * uuid-dev"
+        echo "  * libfreetype6-dev"
+        echo "  * tk-dev"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
         echo
+        echo
+        echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+        echo " so you can use yt without the activate script, you might "
+        echo " want to consider turning off LIBZ and FREETYPE in this"
+        echo " install script by editing this file and setting"
+        echo
+        echo " INST_ZLIB=0"
+        echo " INST_FTYPE=0"
+        echo 
+        echo " to avoid conflicts with other command-line programs "
+        echo " (like eog and evince, for example)."
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -400,7 +413,7 @@
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
@@ -429,7 +442,7 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.1.0.tar.gz
 get_ytproject mercurial-2.2.2.tar.gz
@@ -554,11 +567,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
-    [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
-    cd Python-2.7.2
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,6 +38,7 @@
     inline = 'False',
     numthreads = '-1',
     __withinreason = 'False',
+    __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
     __global_parallel_size = '1',
@@ -53,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -65,6 +65,9 @@
     quantity_info, \
     add_quantity
 
+from image_array import \
+    ImageArray
+
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
@@ -708,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -724,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -748,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -767,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask
@@ -855,6 +859,22 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
+    def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
+        axis = self.axis
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+                         plot_type=plot_type)
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
@@ -916,26 +936,6 @@
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
-    def to_pw(self):
-        r"""Create a :class:`~yt.visualization.plot_window.PlotWindow` from this
-        object.
-
-        This is a bare-bones mechanism of creating a plot window from this
-        object, which can then be moved around, zoomed, and on and on.  All
-        behavior of the plot window is relegated to that routine.
-        """
-        axis = self.axis
-        center = self.get_field_parameter("center")
-        if center is None:
-            center = (self.pf.domain_right_edge
-                    + self.pf.domain_left_edge)/2.0
-        width = (1.0, 'unitary')
-        from yt.visualization.plot_window import \
-            PWViewerMPL, GetBoundsAndCenter
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
-        pw = PWViewerMPL(self, bounds)
-        return pw
-
     def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
         """
         This returns a uniform grid of points between *LE* and *RE*,
@@ -1193,6 +1193,18 @@
     def hub_upload(self):
         self._mrep.upload()
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Slice')
+        return pw
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1355,6 +1367,30 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        normal = self.normal
+        center = self.center
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetOffAxisBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
+                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
@@ -1762,6 +1798,18 @@
             convs[:] = 1.0
         return dls, convs
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def get_data(self, fields = None):
         if fields is None: fields = ensure_list(self.fields)[:]
         else: fields = ensure_list(fields)
@@ -2254,6 +2302,18 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def _project_grid(self, grid, fields, zero_out):
         # We split this next bit into two sections to try to limit the IO load
         # on the system.  This way, we perserve grid state (@restore_grid_state
@@ -3445,10 +3505,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3474,17 +3531,45 @@
                  pf=None, **kwargs):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
+
         AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
@@ -3602,7 +3687,7 @@
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    _con_args = ('level', 'left_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """A 3D region with all data extracted to a single, specified
@@ -3629,8 +3714,9 @@
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = np.array(left_edge)
         self.level = level
-        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = np.array(dims,dtype='int32')
+        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+        self.dds = self.pf.domain_width/rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -160,7 +160,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/image_array.py
--- /dev/null
+++ b/yt/data_objects/image_array.py
@@ -0,0 +1,271 @@
+"""
+ImageArray Class
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+    Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  """
+
+import numpy as np
+import h5py as h5
+from yt.visualization.image_writer import write_bitmap, write_image
+
+class ImageArray(np.ndarray):
+    r"""A custom Numpy ndarray used for images.
+
+    This differs from ndarray in that you can optionally specify an
+    info dictionary which is used later in saving, and can be accessed with
+    ImageArray.info.
+
+    Parameters
+    ----------
+    input_array: array_like
+        A numpy ndarray, or list. 
+
+    Other Parameters
+    ----------------
+    info: dictionary
+        Contains information to be stored with image.
+
+    Returns
+    -------
+    obj: ImageArray object 
+
+    Raises
+    ------
+    None
+
+    See Also
+    --------
+    numpy.ndarray : Inherits
+
+    Notes
+    -----
+
+    References
+    ----------
+
+    Examples
+    --------
+    These are written in doctest format, and should illustrate how to
+    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    a plot collection, 'c' for a center, and 'L' for a vector. 
+
+    >>> im = np.zeros([64,128,3])
+    >>> for i in xrange(im.shape[0]):
+    >>>     for k in xrange(im.shape[2]):
+    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    >>> im_arr = ImageArray(im, info=myinfo)
+    >>> im_arr.save('test_ImageArray')
+
+    Numpy ndarray documentation appended:
+
+    """
+    def __new__(cls, input_array, info=None):
+        # Input array is an already formed ndarray instance
+        # We first cast to be our class type
+        obj = np.asarray(input_array).view(cls)
+        # add the new attribute to the created instance
+        if info is None:
+            info = {}
+        obj.info = info
+        # Finally, we must return the newly created object:
+        return obj
+
+    def __array_finalize__(self, obj):
+        # see InfoArray.__array_finalize__ for comments
+        if obj is None: return
+        self.info = getattr(obj, 'info', None)
+
+    def write_hdf5(self, filename):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_hdf5('test_ImageArray.h5')
+
+        """
+        array_name = self.info.get("name","image")
+
+        f = h5.File(filename)
+        if array_name in f.keys():
+            del f[array_name]
+        d = f.create_dataset(array_name, data=self)
+        for k, v in self.info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    def write_png(self, filename, clip_ratio=None):
+        r"""Writes ImageArray to png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_png('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if clip_ratio is not None:
+            return write_bitmap(self.swapaxes(0, 1), filename,
+                                clip_ratio * self.std())
+        else:
+            return write_bitmap(self.swapaxes(0, 1), filename)
+
+    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+        r"""Writes a single channel of the ImageArray to a png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Other Parameters
+        ----------------
+        channel: int
+            Which channel to write out as an image. Defaults to 0
+        cmap_name: string
+            Name of the colormap to be used.
+        color_bounds : tuple of floats, optional
+            The min and max to scale between.  Outlying values will be clipped.
+        cmap_name : string, optional
+            An acceptable colormap.  See either yt.visualization.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+        func : function, optional
+            A function to transform the buffer before applying a colormap. 
+
+        Returns
+        -------
+        scaled_image : uint8 image that has been saved
+        
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128])
+        >>> for i in xrange(im.shape[0]):
+        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_image('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if channel is None:
+            return write_image(self.swapaxes(0,1), filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+        else:
+            return write_image(self.swapaxes(0,1)[:,:,channel], filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+
+    def save(self, filename, png=True, hdf5=True):
+        """
+        Saves ImageArray. 
+
+        Arguments:
+          filename: string
+            This should not contain the extension type (.png, .h5, ...)
+
+        Optional Arguments:
+          png: boolean, default True
+            Save to a png
+
+          hdf5: boolean, default True
+            Save to hdf5 file, including info dictionary as attributes.
+
+        """
+        if png:
+            if len(self.shape) > 2:
+                self.write_png("%s.png" % filename)
+            else:
+                self.write_image("%s.png" % filename)
+        if hdf5:
+            self.write_hdf5("%s.h5" % filename)
+
+    __doc__ += np.ndarray.__doc__
+
+if __name__ == "__main__":
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,353 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_covering_grid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            yield assert_equal, cg["Ones"].max(), 1.0
+            yield assert_equal, cg["Ones"].min(), 1.0
+            yield assert_equal, cg["CellVolume"].sum(), pf.domain_width.prod()
+            for g in pf.h.grids:
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_profiles.py
--- /dev/null
+++ b/yt/data_objects/tests/test_profiles.py
@@ -0,0 +1,74 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
+
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    for nb in [8, 16, 32, 64]:
+        for lr in [True, False]:
+            # We log all the fields or don't log 'em all.  No need to do them
+            # individually.
+            for lf in [True, False]: 
+                # We have the min and the max, but to avoid cutting them off
+                # since we aren't doing end-collect, we cut a bit off the edges
+                for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
+                    p1d = BinnedProfile1D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        lr, end_collect=ec)
+                    p1d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p1d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+                    p2d = BinnedProfile2D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        lr, end_collect=ec)
+                    p2d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p2d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+                    p3d = BinnedProfile3D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        nb, "Dinosaurs", dmi*e1, dma*e2, lf,
+                        lr, end_collect=ec)
+                    p3d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p3d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+            p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
+            p1d.add_fields("Ones", weight=None)
+            av = nv / nb
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)*av
+            # We re-bin ones with a weight now
+            p1d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)
+
+            p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False)
+            p2d.add_fields("Ones", weight=None)
+            av = nv / nb**2
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))*av
+            # We re-bin ones with a weight now
+            p2d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))
+
+            p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False,
+                                      nb, "z", 0.0, 1.0, False)
+            p3d.add_fields("Ones", weight=None)
+            av = nv / nb**3
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
+            # We re-bin ones with a weight now
+            p3d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_projection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_projection.py
@@ -0,0 +1,59 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_projection():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        dd = pf.h.all_data()
+        rho_tot = dd.quantities["TotalQuantity"]("Density")[0]
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        # Some simple projection tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
+                yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
+                yield assert_equal, proj["Ones"].min(), 1.0
+                yield assert_equal, proj["Ones"].max(), 1.0
+                yield assert_equal, np.unique(proj["px"]), uc[xax]
+                yield assert_equal, np.unique(proj["py"]), uc[yax]
+                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
+            # wf == None
+            yield assert_equal, wf, None
+            v1 = proj["Density"].sum()
+            v2 = (dd["Density"] * dd["d%s" % an]).sum()
+            yield assert_rel_equal, v1, v2, 10
+
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,33 @@
+from yt.testing import *
+
+def test_ray():
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
+
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
+
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -98,7 +98,10 @@
     if fn1.endswith("_Fraction"):
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
-                  display_name="%s\/Density" % fn1.split("_")[0])
+                  display_name="%s\/Density" % fn1.split("_")[0],
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
+                  )
 
 def _get_convert(fname):
     def _conv(data):
@@ -107,7 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -155,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -170,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -182,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 
@@ -204,6 +208,7 @@
     add_field(f, TranslationFunc(v),
               take_log=KnownFLASHFields[v].take_log,
               units = ff._units, display_name=dname,
+              projected_units = ff._projected_units,
               particle_type = pfield)
 
 def _convertParticleMassMsun(data):


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,8 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids
 
 from .fields import \
       KnownStreamFields, \


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -353,7 +353,8 @@
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
                 decompose_array(data[key], psize, bbox)
-            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+                                       dtype="int32")
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():
@@ -364,7 +365,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3)
+        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
     handler = StreamHandler(
         grid_left_edges,
@@ -394,3 +395,103 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -310,7 +310,8 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook"):
+       ytcfg.getboolean("yt", "ipython_notebook") or \
+       ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
         from yt.gui.reason.extdirect_repl import ExtProgressBar


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -62,7 +62,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection
+    ParticleTrajectoryCollection, ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -24,7 +24,21 @@
 
 import numpy as np
 from yt.funcs import *
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_almost_equal, \
+    assert_approx_equal, assert_array_almost_equal, assert_equal, \
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+
+def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -127,17 +141,23 @@
 
     return left, right, level
 
-def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
+                   negative = False, nprocs = 1):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
-    ug = load_uniform_grid(data, ndims, 1.0)
+                 for field,offset in zip(fields,offsets))
+    ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,396 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.description] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1095,8 +1095,12 @@
                   )
         else:
             from IPython.config.loader import Config
+            import sys
             cfg = Config()
+            # prepend sys.path with current working directory
+            sys.path.insert(0,'')
             IPython.embed(config=cfg,user_ns=local_ns)
+            
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,29 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/kdtree/test.py
--- a/yt/utilities/kdtree/test.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from Forthon import *
-from fKDpy import *
-import numpy,random
-
-n = 32768
-
-
-fKD.tags = fzeros((64),'i')
-fKD.dist = fzeros((64),'d')
-fKD.pos = fzeros((3,n),'d')
-fKD.nn = 64
-fKD.nparts = n
-fKD.sort = True
-fKD.rearrange = True
-fKD.qv = numpy.array([16./32, 16./32, 16./32])
-
-fp = open('parts.txt','r')
-xpos = []
-ypos = []
-zpos = []
-line = fp.readline()
-while line:
-    line = line.split()
-    xpos.append(float(line[0]))
-    ypos.append(float(line[1]))
-    zpos.append(float(line[2]))
-    line= fp.readline()
-
-fp.close()
-
-
-for k in range(32):
-    for j in range(32):
-        for i in range(32):
-            fKD.pos[0][i + j*32 + k*1024] = float(i)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[1][i + j*32 + k*1024] = float(j)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[2][i + j*32 + k*1024] = float(k)/32 + 1./64 + 0.0001*random.random()
-
-            
-
-#print fKD.pos[0][0],fKD.pos[1][0],fKD.pos[2][0]
-
-create_tree()
-
-
-find_nn_nearest_neighbors()
-
-#print 'next'
-
-#fKD.qv = numpy.array([0., 0., 0.])
-
-#find_nn_nearest_neighbors()
-
-
-#print (fKD.tags - 1)
-#print fKD.dist
-
-free_tree()


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/utilities/tests/test_kdtrees.py
--- /dev/null
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -0,0 +1,93 @@
+"""
+Unit test the kD trees in yt.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: U of Colorado
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Stephen Skory.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+
+try:
+    from yt.utilities.kdtree.api import *
+except ImportError:
+    mylog.debug("The Fortran kD-Tree did not import correctly.")
+
+from yt.utilities.spatial import cKDTree
+
+def setup():
+    pass
+
+def test_fortran_tree():
+    r"""This test makes sure that the fortran kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    try:
+        fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    except NameError:
+        return
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    fKD.pos[0, :] = points
+    fKD.pos[1, :] = points
+    fKD.pos[2, :] = points
+    fKD.qv = np.empty(3, dtype='float64')
+    fKD.dist = np.empty(4, dtype='float64')
+    fKD.tags = np.empty(4, dtype='int64')
+    fKD.nn = 4
+    fKD.sort = True
+    create_tree(0)
+    # Now we check to make sure that we find the correct nearest neighbors,
+    # which get stored in dist and tags.
+    fKD.qv[:] = 0.999
+    find_nn_nearest_neighbors()
+    # Fix fortran counting.
+    fKD.tags -= 1
+    # Clean up before the tests.
+    free_tree(0)
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(fKD.dist, dist)
+    assert_array_equal(fKD.tags, tags)
+
+def test_cython_tree():
+    r"""This test makes sure that the cython kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    pos = np.empty((4, 3), dtype='float64')
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    pos[:, 0] = points
+    pos[:, 1] = points
+    pos[:, 2] = points
+    kdtree = cKDTree(pos, leafsize = 2)
+    qv = np.array([0.999]*3)
+    res = kdtree.query(qv, 4, period=[1.,1.,1])
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(res[0], dist)
+    assert_array_equal(res[1], tags)
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -116,7 +116,7 @@
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
-def write_bitmap(bitmap_array, filename, max_val = None, transpose=True):
+def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
     r"""Write out a bitmapped image directly to a PNG file.
 
     This accepts a three- or four-channel `bitmap_array`.  If the image is not
@@ -379,7 +379,7 @@
                          take_log=True)
     """
     import matplotlib
-    from ._mpl_imports import *
+    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 
     # If this is rendered as log, then apply now.
     if take_log:
@@ -420,21 +420,22 @@
     else:
         dpi = None
 
-    if filename[-4:] == '.png':
-        suffix = ''
-    else:
+    suffix = os.path.splitext(filename)[1]
+
+    if suffix == '':
         suffix = '.png'
         filename = "%s%s" % (filename, suffix)
-    mylog.info("Saving plot %s", fn)
+    mylog.info("Saving plot %s", filename)
     if suffix == ".png":
         canvas = FigureCanvasAgg(fig)
     elif suffix == ".pdf":
         canvas = FigureCanvasPdf(fig)
     elif suffix in (".eps", ".ps"):
-        canvas = FigureCanvasPS
+        canvas = FigureCanvasPS(fig)
     else:
         mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
         canvas = FigureCanvasAgg(fig)
+
     canvas.print_figure(filename)
     return filename
 


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -211,7 +213,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +232,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -288,12 +294,16 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
@@ -364,38 +374,23 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
+        self.dens = density
+        if plot_args is None: plot_args = {}
         self.plot_args = plot_args
         
     def __call__(self, plot):
@@ -404,43 +399,26 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = np.sqrt(pixX**2 + pixY**2)
-        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
-            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        # scale into data units
-        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
-        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -461,30 +439,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -728,9 +682,13 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         from matplotlib.patches import Arrow
         # Now convert the pixels to code information
-        x, y = self.convert_to_plot(plot, self.pos)
+        x, y = self.convert_to_plot(plot, pos)
         dx, dy = self.convert_to_plot(plot, self.code_size, False)
         arrow = Arrow(x, y, dx, dy, **self.plot_args)
         plot._axes.add_patch(arrow)
@@ -750,12 +708,13 @@
         self.text_args = text_args
 
     def __call__(self, plot):
-
-
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         width,height = plot.image._A.shape
-        x,y = self.convert_to_plot(plot, self.pos)
-        x,y = x/width,y/height
-
+        x,y = self.convert_to_plot(plot, pos)
+        
         plot._axes.text(x, y, self.text, **self.text_args)
 
 class MarkerAnnotateCallback(PlotCallback):
@@ -773,13 +732,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -1119,3 +1083,152 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  All other *kwargs* will be passed to the text() 
+        method on the plot axes.  See matplotlib's text() functions for more 
+        information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -695,6 +695,15 @@
 
     """
     _current_field = None
+    _frb_generator = None
+    _plot_type = None
+
+    def __init__(self, *args, **kwargs):
+        if self._frb_generator == None:
+            self._frb_generator = kwargs.pop("frb_generator")
+        if self._plot_type == None:
+            self._plot_type = kwargs.pop("plot_type")
+        PWViewer.__init__(self, *args, **kwargs)
 
     def _setup_plots(self):
         if self._current_field is not None:
@@ -774,14 +783,13 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
             if md['units'] == None or md['units'] == '':
                 label = field_name
             else:
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
@@ -845,17 +853,22 @@
         >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
+        names = []
+        if mpl_kwargs is None: mpl_kwargs = {}
         if name == None:
             name = str(self.pf)
-        elif name.endswith('.png'):
-            return v.save(name)
-        if mpl_kwargs is None: mpl_kwargs = {}
+        suffix = os.path.splitext(name)[1]
+        if suffix != '':
+            for k, v in self.plots.iteritems():
+                names.append(v.save(name,mpl_kwargs))
+            return names
         axis = axis_names[self.data_source.axis]
         weight = None
         type = self._plot_type
         if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
-        names = []
+        if 'Cutting' in self.data_source.__class__.__name__:
+            type = 'OffAxisSlice'
         for k, v in self.plots.iteritems():
             if axis:
                 n = "%s_%s_%s_%s" % (name, type, axis, k)
@@ -1220,6 +1233,7 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
+    _frb_generator = FixedResolutionBuffer
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler
@@ -1397,24 +1411,25 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        if name[-4:] == '.png':
-            suffix = ''
+        suffix = os.path.splitext(name)[1]
+        
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+        mylog.info("Saving plot %s", name)
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
         else:
-            suffix = '.png'
-        fn = "%s%s" % (name, suffix)
-        mylog.info("Saving plot %s", fn)
-        if canvas is None:
-            if suffix == ".png":
-                canvas = FigureCanvasAgg(self.figure)
-            elif suffix == ".pdf":
-                canvas = FigureCanvasPdf(self.figure)
-            elif suffix in (".eps", ".ps"):
-                canvas = FigureCanvasPS
-            else:
-                mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-                canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn,**mpl_kwargs)
-        return fn
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
+
+
+        canvas.print_figure(name,**mpl_kwargs)
+        return name
 
     def _get_best_layout(self, size):
         aspect = 1.0*size[0]/size[1]


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -146,7 +148,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):
@@ -205,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         


diff -r 173eb46cbdd3a77fca3ea6fd920b589b0ba8a23a -r e7456093b8db2c1366f5aa9f3f4091630977c63d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -37,6 +37,7 @@
     arr_ang2pix_nest, arr_fisheye_vectors
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.utilities.orientation import Orientation
+from yt.data_objects.api import ImageArray
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -347,15 +348,21 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.comm.rank is 0 and fn is not None:
-            if clip_ratio is not None:
-                write_bitmap(image, fn, clip_ratio * image.std())
-            else:
-                write_bitmap(image, fn)
-
+            image.write_png(fn, clip_ratio=clip_ratio)
 
     def initialize_source(self):
         return self.volume.initialize_source()
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'east_vector':self.orienter.unit_vectors[0],
+                     'north_vector':self.orienter.unit_vectors[1],
+                     'normal_vector':self.orienter.unit_vectors[2],
+                     'width':self.width,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
         r"""Ray-cast the camera.
@@ -390,7 +397,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clip_ratio, image)
         return image
 
@@ -670,7 +679,7 @@
 class PerspectiveCamera(Camera):
     expand_factor = 1.0
     def __init__(self, *args, **kwargs):
-        expand_factor = kwargs.pop('expand_factor', 1.0)
+        self.expand_factor = kwargs.pop('expand_factor', 1.0)
         Camera.__init__(self, *args, **kwargs)
 
     def get_sampler_args(self, image):
@@ -709,6 +718,27 @@
                 self.transfer_function, self.sub_samples)
         return args
 
+    def _render(self, double_check, num_threads, image, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
+            sampler(brick, num_threads=num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+
+        pbar.finish()
+        image = sampler.aimage
+        self.finalize_image(image)
+        return image
+
+
     def finalize_image(self, image):
         image.shape = self.resolution[0], self.resolution[0], 3
 
@@ -801,6 +831,15 @@
 
         return image
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'center':self.center,
+                     'radius':self.radius,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
@@ -828,7 +867,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clim, image, label = label)
         return image
 
@@ -1264,8 +1305,9 @@
 
         if self.image is not None:
             del self.image
+        image = ImageArray(image,
+                           info=self.get_information())
         self.image = image
-       
         return image
 
     def save_image(self, fn, clip_ratio=None):
@@ -1407,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1442,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1485,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,
@@ -1670,7 +1723,9 @@
 
         self.initialize_source()
 
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
 
         self.save_image(fn, clip_ratio, image)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8fcc60123a02/
changeset:   8fcc60123a02
branch:      yt
user:        MatthewTurk
date:        2012-11-02 12:22:36
summary:     Merging some stray changes to the fluid operators
affected #:  2 files

diff -r e7456093b8db2c1366f5aa9f3f4091630977c63d -r 8fcc60123a028a4330f08fd3e77c84874c6bb2d2 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -57,7 +57,7 @@
             self.sigs.append(flagged.sum(axis=d1).sum(axis=d2))
         self.flagged = flagged
 
-    def find_by_zero_signature(self, dim):
+    def find_by_zero_signature(self):
         ge = []
         for dim in range(3):
             sig = self.sigs[dim]
@@ -75,14 +75,14 @@
             ge.append(grid_ends[:ng,:])
         return ge
 
-    def find_by_second_derivative(flagged, dim):
+    def find_by_second_derivative(self):
         ze = []
         for dim in range(3):
             sig = self.sigs[dim]
             sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
             grid_ends = np.zeros((sig.size, 2))
             ng = 0
-            center = int((flagged.shape[dim] - 1) / 2)
+            center = int((self.flagged.shape[dim] - 1) / 2)
             strength = zero_strength = 0
             for i in range(1, sig.size-1):
                 # Note that sd is offset by one


diff -r e7456093b8db2c1366f5aa9f3f4091630977c63d -r 8fcc60123a028a4330f08fd3e77c84874c6bb2d2 yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -23,21 +23,37 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
+
 class FluidOperator(object):
     def apply(self, pf):
         for g in pf.h.grids: self(g)
 
 class TopHatSphere(FluidOperator):
-    def __init__(self, radius, center, value, field = "Density"):
+    def __init__(self, radius, center, fields):
         self.radius = radius
         self.center = center
-        self.value = value
-        self.field = field
+        self.fields = fields
         
-    def __call__(self, grid):
+    def __call__(self, grid, sub_select = None):
         r = np.zeros(grid.ActiveDimensions, dtype="float64")
         for i, ax in enumerate("xyz"):
             np.add(r, (grid[ax] - self.center[i])**2.0, r)
         np.sqrt(r, r)
         ind = (r <= self.radius)
-        grid[self.field][ind] += self.value
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][r < self.radius] = val
+
+class RandomFluctuation(DataModifier):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc



https://bitbucket.org/yt_analysis/yt-3.0/changeset/10084fb57bba/
changeset:   10084fb57bba
branch:      yt
user:        MatthewTurk
date:        2012-11-02 12:23:31
summary:     Minor fix to the base class of RandomFluctuation
affected #:  1 file

diff -r 8fcc60123a028a4330f08fd3e77c84874c6bb2d2 -r 10084fb57bba1ffcf0191ed9c01561f9240a2f25 yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -46,7 +46,7 @@
         for field, val in self.fields.iteritems():
             grid[field][r < self.radius] = val
 
-class RandomFluctuation(DataModifier):
+class RandomFluctuation(FluidOperator):
     def __init__(self, fields):
         self.fields = fields
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/eea3cbc85877/
changeset:   eea3cbc85877
branch:      yt
user:        MatthewTurk
date:        2012-11-02 18:02:23
summary:     Preliminary support for refining grids.

 * Updated hierarchy print_stats to print cells^3
 * Added grow_flagging_field Cython function
 * Added FlaggingGrid, which is a wrapper for finding the flagging field of a
   grid
 * Added ProtoSubgrid, which can identify splits, shrink itself, etc
 * Added CoredSphere FluidOperator
affected #:  4 files

diff -r 10084fb57bba1ffcf0191ed9c01561f9240a2f25 -r eea3cbc8587746f7e10e07e15f55819355795f5b yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 10084fb57bba1ffcf0191ed9c01561f9240a2f25 -r eea3cbc8587746f7e10e07e15f55819355795f5b yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,6 +24,7 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
@@ -47,36 +48,109 @@
 class FlaggingGrid(object):
     def __init__(self, grid, methods):
         self.grid = grid
-        self.sigs = []
-        flagged = np.zeros(self.grid.ActiveDimensions, dtype="bool")
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
         for method in methods:
             flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
         for dim in range(3):
             d1 = (dim + 1) % 3
             d2 = (dim == 0)
-            self.sigs.append(flagged.sum(axis=d1).sum(axis=d2))
-        self.flagged = flagged
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
 
-    def find_by_zero_signature(self):
-        ge = []
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
         for dim in range(3):
             sig = self.sigs[dim]
-            grid_ends = np.zeros((sig.size, 2))
-            ng = 0
-            i = 0
-            while i < sig.size:
-                if sig[i] != 0:
-                    grid_ends[ng, 0] = i
-                    while i < sig.size and sig[i] != 0:
-                        i += 1
-                    grid_ends[ng, 1] = i - 1
-                    ng += 1
-                i += 1
-            ge.append(grid_ends[:ng,:])
-        return ge
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
 
     def find_by_second_derivative(self):
-        ze = []
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
         for dim in range(3):
             sig = self.sigs[dim]
             sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
@@ -84,7 +158,7 @@
             ng = 0
             center = int((self.flagged.shape[dim] - 1) / 2)
             strength = zero_strength = 0
-            for i in range(1, sig.size-1):
+            for i in range(1, sig.size-2):
                 # Note that sd is offset by one
                 if sd[i-1] * sd[i] < 0:
                     strength = np.abs(sd[i-1] - sd[i])
@@ -92,5 +166,19 @@
                        (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
                         zero_strength = strength
                         zero_cross = i
-            ze.append(zero_cross)
-        return ze
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 10084fb57bba1ffcf0191ed9c01561f9240a2f25 -r eea3cbc8587746f7e10e07e15f55819355795f5b yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -44,7 +44,28 @@
         if sub_select is not None:
             ind &= sub_select
         for field, val in self.fields.iteritems():
-            grid[field][r < self.radius] = val
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
 
 class RandomFluctuation(FluidOperator):
     def __init__(self, fields):


diff -r 10084fb57bba1ffcf0191ed9c01561f9240a2f25 -r eea3cbc8587746f7e10e07e15f55819355795f5b yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4d96a8eab1f0/
changeset:   4d96a8eab1f0
branch:      yt
user:        MatthewTurk
date:        2012-11-02 18:13:27
summary:     Adding refine_amr routine to Stream frontend, for taking a parameter file and
recursively applying refinement criteria and fluid operations
affected #:  2 files

diff -r eea3cbc8587746f7e10e07e15f55819355795f5b -r 4d96a8eab1f0c2daf7b0381fbba90452f85d7351 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r eea3cbc8587746f7e10e07e15f55819355795f5b -r 4d96a8eab1f0c2daf7b0381fbba90452f85d7351 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -495,3 +497,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bc33e01c0c6e/
changeset:   bc33e01c0c6e
branch:      yt
user:        brittonsmith
date:        2012-11-08 20:38:26
summary:     Merged in MatthewTurk/yt (pull request #324)
affected #:  6 files

diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -495,3 +497,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ec777c11fece/
changeset:   ec777c11fece
branch:      yt
user:        MatthewTurk
date:        2012-11-08 21:44:32
summary:     Fixing tests for overdensity flagging
affected #:  1 file

diff -r bc33e01c0c6e636e5a20b1dc64c53c246e750e42 -r ec777c11fece4ea5133f2cf0535e851b7e491b63 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3c88ed17da43/
changeset:   3c88ed17da43
branch:      yt
user:        chummels
date:        2012-11-09 17:15:45
summary:     Adding exception for existence testing in enzo test suite.
affected #:  1 file

diff -r ec777c11fece4ea5133f2cf0535e851b7e491b63 -r 3c88ed17da43b06f6f4c947519f235fc8d508924 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -172,3 +172,11 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."



https://bitbucket.org/yt_analysis/yt-3.0/changeset/426c98880ea9/
changeset:   426c98880ea9
branch:      yt
user:        sskory
date:        2012-11-08 00:30:44
summary:     Added tag hop callback for changeset f15825659f5a
affected #:  1 file

diff -r f15825659f5af3ce64aaad30062aff3603cbfb66 -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,4 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b7d78d603839/
changeset:   b7d78d603839
branch:      yt
user:        sskory
date:        2012-11-09 22:08:57
summary:     Merging.
affected #:  19 files

diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -478,7 +478,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,9 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -395,3 +397,168 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,7 +26,8 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
 def assert_rel_equal(a1, a2, decimals):
     # We have nan checks in here because occasionally we have fields that get


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,6 +34,7 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
 
 from yt.utilities.logger import disable_stream_logging
@@ -75,6 +76,8 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
         if options.compare_name is not None:
             # Now we grab from our S3 store
             if options.compare_name == "latest":
@@ -132,6 +135,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,26 +146,44 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
         if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
+            dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer()
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -312,6 +335,22 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result))
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i])
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
@@ -353,6 +392,25 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
@@ -394,3 +452,60 @@
                     yield PixelizedProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds)
+
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact
+
+


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -172,3 +172,11 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1060,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -407,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -430,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -1170,41 +1173,59 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 


diff -r 426c98880ea9f68c3043347e7b5d2865c8d26b4c -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -209,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -239,6 +243,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -249,11 +257,40 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +310,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -767,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -792,7 +830,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -919,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -975,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -986,8 +1028,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -997,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1057,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1068,8 +1116,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1079,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1116,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1155,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1491,4 +1543,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bf4068e9ce0d/
changeset:   bf4068e9ce0d
branch:      yt
user:        sskory
date:        2012-11-09 22:09:27
summary:     Making sure the workgroups are deleted correctly.
affected #:  1 file

diff -r b7d78d603839d1a51ac0ecb4f54c67552f05c9cc -r bf4068e9ce0d8b85a1d813eff9329428425907ee yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):
@@ -581,9 +585,7 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                data = np.zeros(0, dtype=dtype) # This only works for
-            elif data is None:
-                data = np.zeros((ncols, 0), dtype=dtype)
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1060,49 +1062,3 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
-class GroupOwnership(ParallelAnalysisInterface):
-    def __init__(self, items):
-        ParallelAnalysisInterface.__init__(self)
-        self.num_items = len(items)
-        self.items = items
-        assert(self.num_items >= self.comm.size)
-        self.owned = range(self.comm.size)
-        self.pointer = 0
-        if parallel_capable:
-            communication_system.push_with_ids([self.comm.rank])
-
-    def __del__(self):
-        if parallel_capable:
-            communication_system.pop()
-
-    def inc(self, n = -1):
-        old_item = self.item
-        if n == -1: n = self.comm.size
-        for i in range(n):
-            if self.pointer >= self.num_items - self.comm.size: break
-            self.owned[self.pointer % self.comm.size] += self.comm.size
-            self.pointer += 1
-        if self.item is not old_item:
-            self.switch()
-            
-    def dec(self, n = -1):
-        old_item = self.item
-        if n == -1: n = self.comm.size
-        for i in range(n):
-            if self.pointer == 0: break
-            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
-            self.pointer -= 1
-        if self.item is not old_item:
-            self.switch()
-
-    _last = None
-    @property
-    def item(self):
-        own = self.owned[self.comm.rank]
-        if self._last != own:
-            self._item = self.items[own]
-            self._last = own
-        return self._item
-
-    def switch(self):
-        pass



https://bitbucket.org/yt_analysis/yt-3.0/changeset/fb1152e1af1b/
changeset:   fb1152e1af1b
branch:      yt
user:        sskory
date:        2012-11-09 22:11:18
summary:     Removed tag hop callback
affected #:  1 file

diff -r bf4068e9ce0d8b85a1d813eff9329428425907ee -r fb1152e1af1b2cecb2b31abb92e173e8fa413df3 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5153,3 +5153,5 @@
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bcf34c16c801/
changeset:   bcf34c16c801
branch:      yt
user:        sskory
date:        2012-11-09 22:17:45
summary:     Somehow my merge removed the GroupOwnership class.
affected #:  1 file

diff -r fb1152e1af1b2cecb2b31abb92e173e8fa413df3 -r bcf34c16c801cb3d62496984ec34e2a7e89e77c7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -1062,3 +1062,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5f6163709ed8/
changeset:   5f6163709ed8
branch:      yt
user:        sskory
date:        2012-11-09 22:19:27
summary:     Something else the merge deleted from parallel_analysis_interface.
affected #:  1 file

diff -r bcf34c16c801cb3d62496984ec34e2a7e89e77c7 -r 5f6163709ed86827de0b2db599113dba8f1b1e96 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -585,7 +585,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')



https://bitbucket.org/yt_analysis/yt-3.0/changeset/269d51fb1aa7/
changeset:   269d51fb1aa7
branch:      yt
user:        samskillman
date:        2012-11-09 20:12:10
summary:     Attempting to add local storage of results.
affected #:  1 file

diff -r 5c4bcee82cc5fc730aa3c8d478db82eae801f6a9 -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -61,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False)
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -82,35 +85,33 @@
             # Now we grab from our S3 store
             if options.compare_name == "latest":
                 options.compare_name = _latest
+        if options.store_local_results:
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
+                self.storage = AnswerTestLocalStorage(options.compare_name)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name)
+
         self.answer_name = options.this_name
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
     def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage, result)        
 
-class AnswerTestOpener(object):
+class AnswerTestStorage(object):
     def __init__(self, reference_name):
         self.reference_name = reference_name
         self.cache = {}
+    def dump(self, result_storage, result):
+        pass
+    def get(self, pf_name, default=None):
+        pass
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
@@ -126,6 +127,49 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def dump(self, result_storage, result):
+        # Store data using shelve
+        if self.store_results is False: return
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s_%s" % (self.reference_name, pf_name)
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        # Read data using shelve
+        answer_name = "%s_%s" % (self.reference_name, pf_name)
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -164,6 +208,7 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
@@ -172,7 +217,7 @@
         nv = self.run()
         if self.reference_storage is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer()
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/578f64273856/
changeset:   578f64273856
branch:      yt
user:        samskillman
date:        2012-11-09 20:12:32
summary:     Merging
affected #:  14 files

diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -495,3 +497,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -498,6 +498,22 @@
                         pf_fn, axis, field, weight_field,
                         ds)
 
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 
                  left_edges, right_edges, rtol, atol):


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -172,3 +172,11 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -213,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -257,6 +257,7 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -802,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -827,7 +830,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -954,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1010,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1036,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1096,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1121,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1158,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1197,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and



https://bitbucket.org/yt_analysis/yt-3.0/changeset/27a4ab8ac1cc/
changeset:   27a4ab8ac1cc
branch:      yt
user:        samskillman
date:        2012-11-09 23:27:20
summary:     Updating framework.py to allow for local storage.  Now controlled by --local-store=[False/True]
affected #:  1 file

diff -r 578f64273856f2e6dba89a1c393d375875347313 -r 27a4ab8ac1ccbac0fe5053128a490e4a75f15bd9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -81,31 +81,36 @@
             self.result_storage = defaultdict(dict)
         if options.compare_name == "SKIP":
             options.compare_name = None
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
         if options.store_local_results:
             AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestLocalStorage(options.compare_name)
+                self.storage = \
+                    AnswerTestLocalStorage("%s/%s" % \
+                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
         else:
             AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name)
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
 
+        self.storage_compare_name = options.compare_name
+        self.output_dir = options.output_dir
         self.answer_name = options.this_name
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
+    def finalize(self):
         if self.store_results is False: return
-        self.storage.dump(self.result_storage, result)        
+        self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
     def dump(self, result_storage, result):
         pass
     def get(self, pf_name, default=None):
@@ -113,6 +118,7 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -127,7 +133,8 @@
         self.cache[pf_name] = rv
         return rv
 
-    def dump(self, result_storage, result):
+    def dump(self, result_storage):
+        if self.read: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,27 +151,24 @@
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
-    def __init__(self, reference_name):
-        self.reference_name = reference_name
-        self.cache = {}
-
-    def dump(self, result_storage, result):
+    def dump(self, result_storage):
+        if self.read: return 
         # Store data using shelve
-        if self.store_results is False: return
         ds = shelve.open(self.reference_name, protocol=-1)
         for pf_name in result_storage:
-            answer_name = "%s_%s" % (self.reference_name, pf_name)
+            answer_name = "%s" % pf_name
             if name in ds:
                 mylog.info("Overwriting %s", answer_name)
             ds[answer_name] = result_storage[pf_name]
         ds.close()
 
     def get(self, pf_name, default=None):
+        if not self.read: return default
         # Read data using shelve
-        answer_name = "%s_%s" % (self.reference_name, pf_name)
+        answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
         try:
-            result = ds[name]
+            result = ds[answer_name]
         except KeyError:
             result = default
         ds.close()
@@ -215,7 +219,8 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
+        if self.reference_storage is not None and \
+           self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4ea60322c0c4/
changeset:   4ea60322c0c4
branch:      yt
user:        samskillman
date:        2012-11-09 23:47:06
summary:     Quick fix.  Now both local and cloud work.
affected #:  1 file

diff -r 27a4ab8ac1ccbac0fe5053128a490e4a75f15bd9 -r 4ea60322c0c434459a522973f9f2be732ed66555 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -85,7 +85,7 @@
             options.compare_name = _latest
 
         # We only either store or test.
-        if options.store_local_results:
+        if options.store_local_results == 'True':
             AnswerTestingTest.reference_storage = \
                 self.storage = \
                     AnswerTestLocalStorage("%s/%s" % \
@@ -219,8 +219,7 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None and \
-           self.reference_storage.read:
+        if self.reference_storage is not None and self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4d55b13a56ab/
changeset:   4d55b13a56ab
branch:      yt
user:        MatthewTurk
date:        2012-11-09 23:48:29
summary:     Merged in samskillman/yt (pull request #332)
affected #:  1 file

diff -r 5f6163709ed86827de0b2db599113dba8f1b1e96 -r 4d55b13a56ab0a70c0c5922f4e392e1edb9546ea yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -61,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False)
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -78,40 +81,44 @@
             self.result_storage = defaultdict(dict)
         if options.compare_name == "SKIP":
             options.compare_name = None
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results == 'True':
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
+                self.storage = \
+                    AnswerTestLocalStorage("%s/%s" % \
+                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
+        self.storage_compare_name = options.compare_name
+        self.output_dir = options.output_dir
         self.answer_name = options.this_name
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        pass
+    def get(self, pf_name, default=None):
+        pass
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -126,6 +133,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -164,15 +212,16 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
+        if self.reference_storage is not None and self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer()
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4a1149df9e93/
changeset:   4a1149df9e93
branch:      yt
user:        samskillman
date:        2012-11-09 23:56:24
summary:     Remove unused variables, and resolves the current build error.
affected #:  1 file

diff -r 4ea60322c0c434459a522973f9f2be732ed66555 -r 4a1149df9e9308d2fb0cdaf7d3c46dd069d62e69 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -94,9 +94,6 @@
             AnswerTestingTest.reference_storage = \
                 self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
 
-        self.storage_compare_name = options.compare_name
-        self.output_dir = options.output_dir
-        self.answer_name = options.this_name
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
         global run_big_data



https://bitbucket.org/yt_analysis/yt-3.0/changeset/76614f8362da/
changeset:   76614f8362da
branch:      yt
user:        samskillman
date:        2012-11-09 23:57:08
summary:     Merging.
affected #:  2 files

diff -r 4a1149df9e9308d2fb0cdaf7d3c46dd069d62e69 -r 76614f8362daaa33f2ca07c370a0fdb554dd91f4 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r 4a1149df9e9308d2fb0cdaf7d3c46dd069d62e69 -r 76614f8362daaa33f2ca07c370a0fdb554dd91f4 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f5f2dee81c07/
changeset:   f5f2dee81c07
branch:      yt
user:        samskillman
date:        2012-11-10 00:09:43
summary:     yt needs a result to use.
affected #:  1 file

diff -r 76614f8362daaa33f2ca07c370a0fdb554dd91f4 -r f5f2dee81c071efd5520ee974faf62f1dcb7ca02 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -99,7 +99,7 @@
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self):
+    def finalize(self, result=None):
         if self.store_results is False: return
         self.storage.dump(self.result_storage)        
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9a7d90d4a9fd/
changeset:   9a7d90d4a9fd
branch:      yt
user:        chummels
date:        2012-11-09 21:01:18
summary:     Adding nose install using pip to yt installer, for compatibility with nose testing.
affected #:  1 file

diff -r 3c88ed17da43b06f6f4c947519f235fc8d508924 -r 9a7d90d4a9fdb8d66c848ec33a857fc366841e69 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -715,6 +715,12 @@
     ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
 fi
 
+if !(${DEST_DIR}/bin/python2.7 -c "import nose" >> ${LOG_FILE})
+then
+    echo "Installing pure-python nose"
+    ${DEST_DIR}/bin/pip install nose 1>> ${LOG_FILE}
+fi
+
 if [ $INST_ENZO -eq 1 ]
 then
     echo "Cloning a copy of Enzo."



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d2e7d23fe406/
changeset:   d2e7d23fe406
branch:      yt
user:        chummels
date:        2012-11-09 22:32:19
summary:     Updating install_script to new versions of several pieces of yt-dependent software: Cython 0.17.1, HDF5 1.8.9, h5py 2.1, mercurial 2.4, matplotlib 1.2, ipython 0.13.1. Also adding new programs: nose, hglib, and sympy.
affected #:  1 file

diff -r 9a7d90d4a9fdb8d66c848ec33a857fc366841e69 -r d2e7d23fe406f78eaf3b7319a3f9ac3e20aa8292 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo '58cb029385557667465f6631c4a93aa3be10345bf56e3d6cdd6d1846c95655cf3a377c38cbb75ceb761000efe69fac256a77db4788369d6c205549af6fd18ccf  sympy-0.7.2-py3.3.tar.gz' > sympy-0.7.2-py3.3.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2-py3.3.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2-py3.3
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d4c2e138d178/
changeset:   d4c2e138d178
branch:      yt
user:        chummels
date:        2012-11-09 22:38:03
summary:     Getting rid of extraneous pip install nose command in installer script.
affected #:  1 file

diff -r d2e7d23fe406f78eaf3b7319a3f9ac3e20aa8292 -r d4c2e138d178be1f0756654edb7e82aa638b4b35 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -723,12 +723,6 @@
     ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
 fi
 
-if !(${DEST_DIR}/bin/python2.7 -c "import nose" >> ${LOG_FILE})
-then
-    echo "Installing pure-python nose"
-    ${DEST_DIR}/bin/pip install nose 1>> ${LOG_FILE}
-fi
-
 if [ $INST_ENZO -eq 1 ]
 then
     echo "Cloning a copy of Enzo."



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3e5b4b20d47f/
changeset:   3e5b4b20d47f
branch:      yt
user:        chummels
date:        2012-11-09 22:42:26
summary:     Correcting a bug in install_script.
affected #:  1 file

diff -r d4c2e138d178be1f0756654edb7e82aa638b4b35 -r 3e5b4b20d47fa97ecd7a2a7aa236db9c91d1141a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -451,7 +451,7 @@
 get_ytproject mercurial-2.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
 get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
 get_ytproject nose-1.2.1.tar.gz 
@@ -692,7 +692,7 @@
 
 do_setup_py ipython-0.13.1
 do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
 do_setup_py nose-1.2.1
 do_setup_py python-hglib-0.2



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0a9108f26b4f/
changeset:   0a9108f26b4f
branch:      yt
user:        chummels
date:        2012-11-09 23:25:30
summary:     Fixing a bug in install_script.py
affected #:  1 file

diff -r 3e5b4b20d47fa97ecd7a2a7aa236db9c91d1141a -r 0a9108f26b4fd0c0e523fec9f0975c7cc61f4929 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -432,7 +432,7 @@
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
-echo '58cb029385557667465f6631c4a93aa3be10345bf56e3d6cdd6d1846c95655cf3a377c38cbb75ceb761000efe69fac256a77db4788369d6c205549af6fd18ccf  sympy-0.7.2-py3.3.tar.gz' > sympy-0.7.2-py3.3.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
@@ -456,7 +456,7 @@
 get_ytproject Forthon-0.8.10.tar.gz
 get_ytproject nose-1.2.1.tar.gz 
 get_ytproject python-hglib-0.2.tar.gz
-get_ytproject sympy-0.7.2-py3.3.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -696,7 +696,7 @@
 do_setup_py Forthon-0.8.10
 do_setup_py nose-1.2.1
 do_setup_py python-hglib-0.2
-do_setup_py sympy-0.7.2-py3.3
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c42bdbdfc464/
changeset:   c42bdbdfc464
branch:      yt
user:        chummels
date:        2012-11-10 00:45:09
summary:     Fixed a bug with pyparsing where it got renamed in matplotlib v1.2.0.
affected #:  1 file

diff -r 0a9108f26b4fd0c0e523fec9f0975c7cc61f4929 -r c42bdbdfc464a30eb7d93c27aeb0036915ac12b4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,18 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1d22cb72872f/
changeset:   1d22cb72872f
branch:      yt
user:        chummels
date:        2012-11-10 00:46:21
summary:     Oops, forgot an additional import.
affected #:  1 file

diff -r c42bdbdfc464a30eb7d93c27aeb0036915ac12b4 -r 1d22cb72872f253e035a9f4fc9fb4eb63025f073 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,6 +27,8 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
 
 # Some magic for dealing with pyparsing being included or not
 # included in matplotlib (not in gentoo, yes in everything else)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6812eb0787cf/
changeset:   6812eb0787cf
branch:      yt
user:        MatthewTurk
date:        2012-11-10 05:24:35
summary:     Merged in chummels/yt (pull request #333)
affected #:  2 files

diff -r f5f2dee81c071efd5520ee974faf62f1dcb7ca02 -r 6812eb0787cfd146b71702c8d593fda40c507f4a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r f5f2dee81c071efd5520ee974faf62f1dcb7ca02 -r 6812eb0787cfd146b71702c8d593fda40c507f4a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,20 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__



https://bitbucket.org/yt_analysis/yt-3.0/changeset/66828be39280/
changeset:   66828be39280
branch:      yt
user:        chummels
date:        2012-11-10 05:22:19
summary:     Fixing stupid mistake with enzo test existence exception.
affected #:  1 file

diff -r 1d22cb72872f253e035a9f4fc9fb4eb63025f073 -r 66828be392800156bc13974a0f32df29f62c0dde yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -25,6 +25,7 @@
 
 # We don't need to import 'exceptions'
 #import exceptions
+import os.path
 
 class YTException(Exception):
     def __init__(self, pf = None):
@@ -174,8 +175,9 @@
         return "Must have A>=B>=C"
 
 class EnzoTestOutputFileNonExistent(YTException):
-    def __init__(self, testname):
-        self.testname = testname
+    def __init__(self, filename):
+        self.filename = filename
+        self.testname = os.path.basename(os.path.dirname(filename))
 
     def __str__(self):
         return "Enzo test output file (OutputLog) not generated for: " + \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b6fa86f54076/
changeset:   b6fa86f54076
branch:      yt
user:        chummels
date:        2012-11-10 05:23:13
summary:     Merging.
affected #:  3 files

diff -r 66828be392800156bc13974a0f32df29f62c0dde -r b6fa86f54076af2dc6a136c695f09c0763244825 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r 66828be392800156bc13974a0f32df29f62c0dde -r b6fa86f54076af2dc6a136c695f09c0763244825 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -61,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False)
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -78,40 +81,41 @@
             self.result_storage = defaultdict(dict)
         if options.compare_name == "SKIP":
             options.compare_name = None
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results == 'True':
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
+                self.storage = \
+                    AnswerTestLocalStorage("%s/%s" % \
+                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self, result=None):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        pass
+    def get(self, pf_name, default=None):
+        pass
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -126,6 +130,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -164,15 +209,16 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
+        if self.reference_storage is not None and self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer()
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:


diff -r 66828be392800156bc13974a0f32df29f62c0dde -r b6fa86f54076af2dc6a136c695f09c0763244825 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5aee482e156a/
changeset:   5aee482e156a
branch:      yt
user:        chummels
date:        2012-11-10 05:25:36
summary:     Merging.
affected #:  0 files


https://bitbucket.org/yt_analysis/yt-3.0/changeset/f074ba977987/
changeset:   f074ba977987
branch:      yt
user:        brittonsmith
date:        2012-11-10 15:09:47
summary:     Adding enzo-specific testing support.
affected #:  2 files

diff -r 369da2bf6c0a8fc4f045f1abebe57382648d6f14 -r f074ba977987cb751ea14841dded187c11a4ea11 yt/frontends/enzo/answer_testing_support.py
--- /dev/null
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -0,0 +1,112 @@
+"""
+Answer Testing support for Enzo.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+
+from yt.utilities.answer_testing.framework import \
+     AnswerTestingTest, \
+     can_run_pf, \
+     FieldValuesTest, \
+     GridHierarchyTest, \
+     GridValuesTest, \
+     ProjectionValuesTest, \
+     ParentageRelationshipsTest, \
+     temp_cwd
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+     
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact


diff -r 369da2bf6c0a8fc4f045f1abebe57382648d6f14 -r f074ba977987cb751ea14841dded187c11a4ea11 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -392,25 +392,6 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-def requires_outputlog(path = ".", prefix = ""):
-    def ffalse(func):
-        return lambda: None
-    def ftrue(func):
-        @wraps(func)
-        def fyielder(*args, **kwargs):
-            with temp_cwd(path):
-                for t in func(*args, **kwargs):
-                    if isinstance(t, AnswerTestingTest):
-                        t.prefix = prefix
-                    yield t
-        return fyielder
-    if os.path.exists("OutputLog"):
-        return ftrue
-    with temp_cwd(path):
-        if os.path.exists("OutputLog"):
-            return ftrue
-    return ffalse
-
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
@@ -452,60 +433,3 @@
                     yield PixelizedProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds)
-
-def standard_small_simulation(pf_fn, fields):
-    if not can_run_pf(pf_fn): return
-    dso = [None]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
-    for field in fields:
-        yield GridValuesTest(pf_fn, field)
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                for weight_field in [None, "Density"]:
-                    yield ProjectionValuesTest(
-                        pf_fn, axis, field, weight_field,
-                        ds)
-                yield FieldValuesTest(
-                        pf_fn, field, ds)
-                    
-class ShockTubeTest(object):
-    def __init__(self, data_file, solution_file, fields, 
-                 left_edges, right_edges, rtol, atol):
-        self.solution_file = solution_file
-        self.data_file = data_file
-        self.fields = fields
-        self.left_edges = left_edges
-        self.right_edges = right_edges
-        self.rtol = rtol
-        self.atol = atol
-
-    def __call__(self):
-        # Read in the pf
-        pf = load(self.data_file)  
-        exact = self.get_analytical_solution() 
-
-        ad = pf.h.all_data()
-        position = ad['x']
-        for k in self.fields:
-            field = ad[k]
-            for xmin, xmax in zip(self.left_edges, self.right_edges):
-                mask = (position >= xmin)*(position <= xmax)
-                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
-                # yield test vs analytical solution 
-                yield assert_allclose, field[mask], exact_field, \
-                    self.rtol, self.atol
-
-    def get_analytical_solution(self):
-        # Reads in from file 
-        pos, dens, vel, pres, inte = \
-                np.loadtxt(self.solution_file, unpack=True)
-        exact = {}
-        exact['pos'] = pos
-        exact['Density'] = dens
-        exact['x-velocity'] = vel
-        exact['Pressure'] = pres
-        exact['ThermalEnergy'] = inte
-        return exact
-
-



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5e3967177d65/
changeset:   5e3967177d65
branch:      yt
user:        brittonsmith
date:        2012-11-10 15:10:00
summary:     Merged.
affected #:  17 files

diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -495,3 +497,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -61,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False)
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -78,40 +81,41 @@
             self.result_storage = defaultdict(dict)
         if options.compare_name == "SKIP":
             options.compare_name = None
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results == 'True':
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
+                self.storage = \
+                    AnswerTestLocalStorage("%s/%s" % \
+                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self, result=None):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        pass
+    def get(self, pf_name, default=None):
+        pass
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -126,6 +130,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -164,15 +209,16 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
+        if self.reference_storage is not None and self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer()
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -25,6 +25,7 @@
 
 # We don't need to import 'exceptions'
 #import exceptions
+import os.path
 
 class YTException(Exception):
     def __init__(self, pf = None):
@@ -172,3 +173,12 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, filename):
+        self.filename = filename
+        self.testname = os.path.basename(os.path.dirname(filename))
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):


diff -r f074ba977987cb751ea14841dded187c11a4ea11 -r 5e3967177d655534958bfd15f7c2762111c60141 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,20 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__
@@ -213,7 +223,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -257,6 +267,7 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -802,8 +813,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -827,7 +840,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -954,7 +969,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1010,6 +1025,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1036,7 +1053,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1096,6 +1113,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1121,7 +1140,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1158,7 +1177,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1197,7 +1217,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and



https://bitbucket.org/yt_analysis/yt-3.0/changeset/66da587187b8/
changeset:   66da587187b8
branch:      yt
user:        MatthewTurk
date:        2012-11-10 19:51:57
summary:     Don't allow can_run_pf and data_dir_load to run if the path does not exist.
affected #:  1 file

diff -r 5e3967177d655534958bfd15f7c2762111c60141 -r 66da587187b86e19ae50146341e51fe61d276899 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -180,6 +180,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        retrun False
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
@@ -191,6 +193,8 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        retrun False
     if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7d852e905478/
changeset:   7d852e905478
branch:      yt
user:        MatthewTurk
date:        2012-11-10 19:53:36
summary:     Typo fix.  This fixes syntax.
affected #:  1 file

diff -r 66da587187b86e19ae50146341e51fe61d276899 -r 7d852e9054786462343c25865713771dcefbc52a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -181,7 +181,7 @@
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
-        retrun False
+        return False
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
@@ -194,7 +194,7 @@
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
-        retrun False
+        return False
     if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/81e081f65311/
changeset:   81e081f65311
branch:      yt
user:        brittonsmith
date:        2012-11-10 23:52:49
summary:     Adding error messages and optional relative comparison for some tests.
affected #:  2 files

diff -r 5e3967177d655534958bfd15f7c2762111c60141 -r 81e081f653117710b1b1653e24387ac229adbd8d yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,7 +29,7 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
     assert_allclose
 
-def assert_rel_equal(a1, a2, decimals):
+def assert_rel_equal(a1, a2, decimals, err_msg=''):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -39,7 +39,7 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 5e3967177d655534958bfd15f7c2762111c60141 -r 81e081f653117710b1b1653e24387ac229adbd8d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -280,10 +280,12 @@
     _type_name = "FieldValues"
     _attrs = ("field", )
 
-    def __init__(self, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
+        self.decimals = decimals
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
@@ -293,19 +295,26 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        assert_equal(new_result, old_result)
+        err_msg = "Field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
 
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
 
     def __init__(self, pf_fn, axis, field, weight_field = None,
-                 obj_type = None):
+                 obj_type = None, decimals = None):
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
         self.obj_type = obj_type
+        self.decimals = decimals
 
     def run(self):
         if self.obj_type is not None:
@@ -322,7 +331,14 @@
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
-            assert_equal(new_result[k], old_result[k])
+            err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
+              (k, self.field, self.weight_field, self.axis)
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k],
+                             err_msg=err_msg)
+            else:
+                assert_rel_equal(new_result[k], old_result[k], 
+                                 self.decimals, err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -393,9 +409,13 @@
         return result
 
     def compare(self, new_result, old_result):
-        assert_equal(len(new_result), len(old_result))
+        assert_equal(len(new_result), len(old_result),
+                     err_msg="Number of outputs not equal.",
+                     verbose=True)
         for i in range(len(new_result)):
-            assert_equal(new_result[i], old_result[i])
+            assert_equal(new_result[i], old_result[i],
+                         err_msg="Output times not equal.",
+                         verbose=True)
         
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1050f4f6d8ea/
changeset:   1050f4f6d8ea
branch:      yt
user:        brittonsmith
date:        2012-11-10 23:54:18
summary:     Merged.
affected #:  1 file

diff -r 81e081f653117710b1b1653e24387ac229adbd8d -r 1050f4f6d8ea85aaecfbb56fc931a7c97f188719 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -180,6 +180,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
@@ -191,6 +193,8 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
     if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/87c2a8e4af57/
changeset:   87c2a8e4af57
branch:      yt
user:        brittonsmith
date:        2012-11-11 01:12:50
summary:     Adding test for all values of a field in a dataset.
affected #:  1 file

diff -r 1050f4f6d8ea85aaecfbb56fc931a7c97f188719 -r 87c2a8e4af57ee467f65296f8c922e26d7246446 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -307,6 +307,30 @@
             assert_rel_equal(new_result, old_result, self.decimals,
                              err_msg=err_msg, verbose=True)
 
+class AllFieldValuesTest(AnswerTestingTest):
+    _type_name = "AllFieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
+        super(AllFieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        err_msg = "All field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
+            
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/be44666c67ee/
changeset:   be44666c67ee
branch:      yt
user:        samskillman
date:        2012-11-12 18:13:09
summary:     Make the store-local a flag with no arguments, adding a help for it.
affected #:  1 file

diff -r 5e3967177d655534958bfd15f7c2762111c60141 -r be44666c67ee90f43e2fd7bbda3ed0645316b591 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -63,7 +63,7 @@
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
         parser.add_option("--local-store", dest="store_local_results",
-            default=False)
+            default=False, action="store_true", help="Store/Load local results?")
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -85,7 +85,7 @@
             options.compare_name = _latest
 
         # We only either store or test.
-        if options.store_local_results == 'True':
+        if options.store_local_results:
             AnswerTestingTest.reference_storage = \
                 self.storage = \
                     AnswerTestLocalStorage("%s/%s" % \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1555c64e1bd7/
changeset:   1555c64e1bd7
branch:      yt
user:        samskillman
date:        2012-11-12 18:13:20
summary:     Merging
affected #:  2 files

diff -r be44666c67ee90f43e2fd7bbda3ed0645316b591 -r 1555c64e1bd743b17a0d4a8d9f03411e90e14d8e yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,7 +29,7 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
     assert_allclose
 
-def assert_rel_equal(a1, a2, decimals):
+def assert_rel_equal(a1, a2, decimals, err_msg=''):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -39,7 +39,7 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r be44666c67ee90f43e2fd7bbda3ed0645316b591 -r 1555c64e1bd743b17a0d4a8d9f03411e90e14d8e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -180,6 +180,8 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
@@ -191,6 +193,8 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
     if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
@@ -280,10 +284,12 @@
     _type_name = "FieldValues"
     _attrs = ("field", )
 
-    def __init__(self, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
+        self.decimals = decimals
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
@@ -293,19 +299,50 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        assert_equal(new_result, old_result)
+        err_msg = "Field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
 
+class AllFieldValuesTest(AnswerTestingTest):
+    _type_name = "AllFieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
+        super(AllFieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        err_msg = "All field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
+            
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
 
     def __init__(self, pf_fn, axis, field, weight_field = None,
-                 obj_type = None):
+                 obj_type = None, decimals = None):
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
         self.obj_type = obj_type
+        self.decimals = decimals
 
     def run(self):
         if self.obj_type is not None:
@@ -322,7 +359,14 @@
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
-            assert_equal(new_result[k], old_result[k])
+            err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
+              (k, self.field, self.weight_field, self.axis)
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k],
+                             err_msg=err_msg)
+            else:
+                assert_rel_equal(new_result[k], old_result[k], 
+                                 self.decimals, err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -393,9 +437,13 @@
         return result
 
     def compare(self, new_result, old_result):
-        assert_equal(len(new_result), len(old_result))
+        assert_equal(len(new_result), len(old_result),
+                     err_msg="Number of outputs not equal.",
+                     verbose=True)
         for i in range(len(new_result)):
-            assert_equal(new_result[i], old_result[i])
+            assert_equal(new_result[i], old_result[i],
+                         err_msg="Output times not equal.",
+                         verbose=True)
         
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5e1ba28cc582/
changeset:   5e1ba28cc582
branch:      yt
user:        samskillman
date:        2012-11-12 19:57:09
summary:     --answer-compare=SKIP now works.  Local and cloud storage seems to now be working.
affected #:  1 file

diff -r 1555c64e1bd743b17a0d4a8d9f03411e90e14d8e -r 5e1ba28cc582d1f7dc9de6f0c73695722d1f32bd yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -86,10 +86,14 @@
 
         # We only either store or test.
         if options.store_local_results:
+            if options.compare_name is not None:
+                options.compare_name = "%s/%s" % \
+                        (os.path.realpath(options.output_dir), 
+                         options.compare_name)
             AnswerTestingTest.reference_storage = \
                 self.storage = \
-                    AnswerTestLocalStorage("%s/%s" % \
-                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
+                    AnswerTestLocalStorage(options.compare_name, 
+                                           not options.store_results)
         else:
             AnswerTestingTest.reference_storage = \
                 self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
@@ -109,9 +113,9 @@
         self.cache = {}
         self.read = read
     def dump(self, result_storage, result):
-        pass
+        raise NotImplementedError 
     def get(self, pf_name, default=None):
-        pass
+        raise NotImplementedError 
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
@@ -220,7 +224,8 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None and self.reference_storage.read:
+        if self.reference_storage.read and \
+           self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a0b098682138/
changeset:   a0b098682138
branch:      yt
user:        MatthewTurk
date:        2012-11-08 20:33:20
summary:     Merging
affected #:  3 files

diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r a0b0986821382e3baf2a4199fca1425fbc166f0d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3887,10 +3887,21 @@
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
-            fields_to_get = self.fields[:]
+            fields = self.fields[:]
         else:
-            fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r a0b0986821382e3baf2a4199fca1425fbc166f0d yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -25,3 +25,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r b9d9aca343f57f3708754a0043c0a4f4f3b72b35 -r a0b0986821382e3baf2a4199fca1425fbc166f0d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -164,6 +164,7 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d9a5a2350dad/
changeset:   d9a5a2350dad
branch:      yt
user:        brittonsmith
date:        2012-11-12 20:34:58
summary:     Merged in MatthewTurk/yt (pull request #327)
affected #:  3 files

diff -r 5e1ba28cc582d1f7dc9de6f0c73695722d1f32bd -r d9a5a2350dad48594e24bb3ba3d1c013ce2e5edb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3887,10 +3887,21 @@
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
-            fields_to_get = self.fields[:]
+            fields = self.fields[:]
         else:
-            fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.


diff -r 5e1ba28cc582d1f7dc9de6f0c73695722d1f32bd -r d9a5a2350dad48594e24bb3ba3d1c013ce2e5edb yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -25,3 +25,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]





https://bitbucket.org/yt_analysis/yt-3.0/changeset/2095692ab748/
changeset:   2095692ab748
branch:      yt
user:        brittonsmith
date:        2012-11-05 22:24:13
summary:     Enzo simulation time series only loads pfs that actually exist.
affected #:  1 file

diff -r 5b89c5ee1aff0971bde76a52410df526dcb6dde0 -r 2095692ab74850fe0c2bee9892487cb24e706f35 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -241,9 +241,13 @@
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
-        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
-                                parallel=parallel)
-        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+        init_outputs = []
+        for output in my_outputs:
+            if os.path.exists(output['filename']):
+                init_outputs.append(output['filename'])
+            
+        TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(init_outputs))
 
     def _parse_parameter_file(self):
         """



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2dffa1c760fd/
changeset:   2dffa1c760fd
branch:      yt
user:        brittonsmith
date:        2012-11-05 22:36:28
summary:     Having get_outputs_by_key return an empty list if there are no outputs
to search among.
affected #:  1 file

diff -r 2095692ab74850fe0c2bee9892487cb24e706f35 -r 2dffa1c760fd479a5edab07b59de74d0974b4364 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -589,6 +589,8 @@
         if outputs is None:
             outputs = self.all_outputs
         my_outputs = []
+        if not outputs:
+            return my_outputs
         for value in values:
             outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
             if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/72fd8d5173d2/
changeset:   72fd8d5173d2
branch:      yt
user:        brittonsmith
date:        2012-11-12 20:39:18
summary:     Merged.
affected #:  23 files

diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3887,10 +3887,21 @@
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
-            fields_to_get = self.fields[:]
+            fields = self.fields[:]
         else:
-            fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -25,3 +25,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/frontends/enzo/answer_testing_support.py
--- /dev/null
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -0,0 +1,112 @@
+"""
+Answer Testing support for Enzo.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+
+from yt.utilities.answer_testing.framework import \
+     AnswerTestingTest, \
+     can_run_pf, \
+     FieldValuesTest, \
+     GridHierarchyTest, \
+     GridValuesTest, \
+     ProjectionValuesTest, \
+     ParentageRelationshipsTest, \
+     temp_cwd
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+     
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -482,7 +482,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -495,3 +497,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,9 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
-def assert_rel_equal(a1, a2, decimals):
+def assert_rel_equal(a1, a2, decimals, err_msg=''):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -38,7 +39,7 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,7 +34,9 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -60,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False, action="store_true", help="Store/Load local results?")
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -75,40 +79,47 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        if options.compare_name == "SKIP":
+            options.compare_name = None
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results:
+            if options.compare_name is not None:
+                options.compare_name = "%s/%s" % \
+                        (os.path.realpath(options.output_dir), 
+                         options.compare_name)
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
+                self.storage = \
+                    AnswerTestLocalStorage(options.compare_name, 
+                                           not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self, result=None):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        raise NotImplementedError 
+    def get(self, pf_name, default=None):
+        raise NotImplementedError 
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -123,6 +134,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -132,6 +184,10 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,26 +197,48 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
-            if dd is None: raise YTNoOldAnswer()
+        if self.reference_storage.read and \
+           self.reference_storage.reference_name is not None:
+            dd = self.reference_storage.get(self.storage_name)
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -211,10 +289,12 @@
     _type_name = "FieldValues"
     _attrs = ("field", )
 
-    def __init__(self, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
+        self.decimals = decimals
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
@@ -224,19 +304,50 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        assert_equal(new_result, old_result)
+        err_msg = "Field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
 
+class AllFieldValuesTest(AnswerTestingTest):
+    _type_name = "AllFieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
+        super(AllFieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        err_msg = "All field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
+            
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
 
     def __init__(self, pf_fn, axis, field, weight_field = None,
-                 obj_type = None):
+                 obj_type = None, decimals = None):
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
         self.obj_type = obj_type
+        self.decimals = decimals
 
     def run(self):
         if self.obj_type is not None:
@@ -253,7 +364,14 @@
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
-            assert_equal(new_result[k], old_result[k])
+            err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
+              (k, self.field, self.weight_field, self.axis)
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k],
+                             err_msg=err_msg)
+            else:
+                assert_rel_equal(new_result[k], old_result[k], 
+                                 self.decimals, err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -312,6 +430,26 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                     err_msg="Number of outputs not equal.",
+                     verbose=True)
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i],
+                         err_msg="Output times not equal.",
+                         verbose=True)
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -25,6 +25,7 @@
 
 # We don't need to import 'exceptions'
 #import exceptions
+import os.path
 
 class YTException(Exception):
     def __init__(self, pf = None):
@@ -172,3 +173,12 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, filename):
+        self.filename = filename
+        self.testname = os.path.basename(os.path.dirname(filename))
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -407,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -430,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -809,6 +812,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -825,18 +829,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1170,10 +1175,11 @@
           }
     _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized = False, 
-                 bbox_dict = None, **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 


diff -r 2dffa1c760fd479a5edab07b59de74d0974b4364 -r 72fd8d5173d2a76978acf3384a5605f8370001a6 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,20 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__
@@ -213,7 +223,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -246,7 +256,7 @@
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf) 
-        self.ts = ts
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -257,6 +267,7 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -802,8 +813,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -827,7 +840,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -954,7 +969,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1010,6 +1025,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1036,7 +1053,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1096,6 +1113,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1121,7 +1140,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1158,7 +1177,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1197,7 +1217,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and



https://bitbucket.org/yt_analysis/yt-3.0/changeset/232e4623dc1e/
changeset:   232e4623dc1e
branch:      yt
user:        samskillman
date:        2012-11-12 23:27:06
summary:     Don't test field values for particles.  Also assert_rel_equal does not have a verbose option.
affected #:  2 files

diff -r d9a5a2350dad48594e24bb3ba3d1c013ce2e5edb -r 232e4623dc1e1e91ca6442706eed185584f3ed00 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -63,6 +63,7 @@
     yield ParentageRelationshipsTest(pf_fn)
     for field in fields:
         yield GridValuesTest(pf_fn, field)
+        if 'particle' in field: continue
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:


diff -r d9a5a2350dad48594e24bb3ba3d1c013ce2e5edb -r 232e4623dc1e1e91ca6442706eed185584f3ed00 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -310,7 +310,7 @@
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
-                             err_msg=err_msg, verbose=True)
+                             err_msg=err_msg)
 
 class AllFieldValuesTest(AnswerTestingTest):
     _type_name = "AllFieldValues"
@@ -334,7 +334,7 @@
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
-                             err_msg=err_msg, verbose=True)
+                             err_msg=err_msg)
             
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d84dc871dfc9/
changeset:   d84dc871dfc9
branch:      yt
user:        samskillman
date:        2012-11-12 23:27:38
summary:     Merging
affected #:  1 file

diff -r 232e4623dc1e1e91ca6442706eed185584f3ed00 -r d84dc871dfc9657a2b2f461cb7a6e0bad7508386 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -241,9 +241,13 @@
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
-        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
-                                parallel=parallel)
-        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+        init_outputs = []
+        for output in my_outputs:
+            if os.path.exists(output['filename']):
+                init_outputs.append(output['filename'])
+            
+        TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(init_outputs))
 
     def _parse_parameter_file(self):
         """
@@ -585,6 +589,8 @@
         if outputs is None:
             outputs = self.all_outputs
         my_outputs = []
+        if not outputs:
+            return my_outputs
         for value in values:
             outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
             if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3b328da941be/
changeset:   3b328da941be
branch:      yt
user:        MatthewTurk
date:        2012-11-13 15:14:52
summary:     Adding a comoving units conversion to FLASH
affected #:  1 file

diff -r d84dc871dfc9657a2b2f461cb7a6e0bad7508386 -r 3b328da941be8637737b0405e57d62227b77be6d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -274,6 +274,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7f0e191c6869/
changeset:   7f0e191c6869
branch:      yt
user:        jsoishi
date:        2012-11-13 16:22:18
summary:     added stefan-boltzmann constant in cgs
affected #:  1 file

diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 7f0e191c686919d4932ec0c178f41521f75a90c2 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -20,6 +20,7 @@
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
+stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8cee6c380933/
changeset:   8cee6c380933
branch:      yt
user:        jsoishi
date:        2012-11-13 16:22:44
summary:     merged.
affected #:  30 files

diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -709,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -725,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -749,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -768,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask
@@ -3884,10 +3887,21 @@
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
-            fields_to_get = self.fields[:]
+            fields = self.fields[:]
         else:
-            fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -25,3 +25,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/enzo/answer_testing_support.py
--- /dev/null
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -0,0 +1,113 @@
+"""
+Answer Testing support for Enzo.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+
+from yt.utilities.answer_testing.framework import \
+     AnswerTestingTest, \
+     can_run_pf, \
+     FieldValuesTest, \
+     GridHierarchyTest, \
+     GridValuesTest, \
+     ProjectionValuesTest, \
+     ParentageRelationshipsTest, \
+     temp_cwd
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+     
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        if 'particle' in field: continue
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -241,9 +241,13 @@
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
-        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
-                                parallel=parallel)
-        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+        init_outputs = []
+        for output in my_outputs:
+            if os.path.exists(output['filename']):
+                init_outputs.append(output['filename'])
+            
+        TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(init_outputs))
 
     def _parse_parameter_file(self):
         """
@@ -478,7 +482,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.
@@ -585,6 +589,8 @@
         if outputs is None:
             outputs = self.all_outputs
         my_outputs = []
+        if not outputs:
+            return my_outputs
         for value in values:
             outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
             if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -274,6 +274,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,9 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -395,3 +397,168 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,9 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
-def assert_rel_equal(a1, a2, decimals):
+def assert_rel_equal(a1, a2, decimals, err_msg=''):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -38,7 +39,7 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,7 +34,9 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -60,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False, action="store_true", help="Store/Load local results?")
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -75,40 +79,47 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        if options.compare_name == "SKIP":
+            options.compare_name = None
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results:
+            if options.compare_name is not None:
+                options.compare_name = "%s/%s" % \
+                        (os.path.realpath(options.output_dir), 
+                         options.compare_name)
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
+                self.storage = \
+                    AnswerTestLocalStorage(options.compare_name, 
+                                           not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self, result=None):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        raise NotImplementedError 
+    def get(self, pf_name, default=None):
+        raise NotImplementedError 
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -123,6 +134,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -132,6 +184,10 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,26 +197,48 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
-            if dd is None: raise YTNoOldAnswer()
+        if self.reference_storage.read and \
+           self.reference_storage.reference_name is not None:
+            dd = self.reference_storage.get(self.storage_name)
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -211,10 +289,12 @@
     _type_name = "FieldValues"
     _attrs = ("field", )
 
-    def __init__(self, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
+        self.decimals = decimals
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
@@ -224,19 +304,50 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        assert_equal(new_result, old_result)
+        err_msg = "Field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg)
 
+class AllFieldValuesTest(AnswerTestingTest):
+    _type_name = "AllFieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
+        super(AllFieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        err_msg = "All field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg)
+            
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
 
     def __init__(self, pf_fn, axis, field, weight_field = None,
-                 obj_type = None):
+                 obj_type = None, decimals = None):
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
         self.obj_type = obj_type
+        self.decimals = decimals
 
     def run(self):
         if self.obj_type is not None:
@@ -253,7 +364,14 @@
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
-            assert_equal(new_result[k], old_result[k])
+            err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
+              (k, self.field, self.weight_field, self.axis)
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k],
+                             err_msg=err_msg)
+            else:
+                assert_rel_equal(new_result[k], old_result[k], 
+                                 self.decimals, err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -312,6 +430,26 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                     err_msg="Number of outputs not equal.",
+                     verbose=True)
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i],
+                         err_msg="Output times not equal.",
+                         verbose=True)
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -25,6 +25,7 @@
 
 # We don't need to import 'exceptions'
 #import exceptions
+import os.path
 
 class YTException(Exception):
     def __init__(self, pf = None):
@@ -172,3 +173,12 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, filename):
+        self.filename = filename
+        self.testname = os.path.basename(os.path.dirname(filename))
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):
@@ -581,7 +585,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1064,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 
@@ -383,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -406,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -375,12 +375,10 @@
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
     def __init__(self, field_x, field_y, factor = 16,
-                 density = 1, arrowsize = 1, arrowstyle = None,
-                 color = None, normalize = False):
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
-                             arrowsize = 1, arrowstyle = None,
-                             color = None, normalize = False):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints like
@@ -392,12 +390,8 @@
         self.bv_x = self.bv_y = 0
         self.factor = factor
         self.dens = density
-        self.arrowsize = arrowsize
-        if arrowstyle is None : arrowstyle='-|>'
-        self.arrowstyle = arrowstyle
-        if color is None : color = "#000000"
-        self.color = color
-        self.normalize = normalize
+        if plot_args is None: plot_args = {}
+        self.plot_args = plot_args
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -421,15 +415,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
-        if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
-            pixX /= nn
-            pixY /= nn
-        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
-                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
-                              color=self.color, norm=self.normalize)
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -644,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -673,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -743,13 +735,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -815,6 +812,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -831,18 +829,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1174,41 +1173,59 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,20 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__
@@ -60,6 +70,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -209,7 +223,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -239,6 +253,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -249,11 +267,40 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +320,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -767,8 +813,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -783,17 +831,18 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
             if md['units'] == None or md['units'] == '':
                 label = field_name
             else:
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -920,7 +969,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -976,6 +1025,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -987,8 +1038,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -998,7 +1053,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1058,6 +1113,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1069,8 +1126,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1080,7 +1140,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1117,7 +1177,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1156,7 +1217,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1492,4 +1553,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -206,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ab6c7184809e/
changeset:   ab6c7184809e
branch:      yt
user:        brittonsmith
date:        2012-11-13 04:40:55
summary:     Adding a verbose option to assert_rel_equal.
affected #:  2 files

diff -r d84dc871dfc9657a2b2f461cb7a6e0bad7508386 -r ab6c7184809ebe326070391b21ee8c6860eb3001 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,7 +29,7 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
     assert_allclose
 
-def assert_rel_equal(a1, a2, decimals, err_msg=''):
+def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -39,7 +39,8 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg,
+                               verbose=verbose)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r d84dc871dfc9657a2b2f461cb7a6e0bad7508386 -r ab6c7184809ebe326070391b21ee8c6860eb3001 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -310,7 +310,7 @@
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
-                             err_msg=err_msg)
+                             err_msg=err_msg, verbose=True)
 
 class AllFieldValuesTest(AnswerTestingTest):
     _type_name = "AllFieldValues"
@@ -334,7 +334,7 @@
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
-                             err_msg=err_msg)
+                             err_msg=err_msg, verbose=True)
             
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8df4f0a5ed5f/
changeset:   8df4f0a5ed5f
branch:      yt
user:        brittonsmith
date:        2012-11-14 16:15:06
summary:     Tweaking the standard enzo answer tests.
affected #:  1 file

diff -r ab6c7184809ebe326070391b21ee8c6860eb3001 -r 8df4f0a5ed5fef39f09c0dda55224979a3ebc07b yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -59,8 +59,6 @@
 def standard_small_simulation(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [None]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
     for field in fields:
         yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
@@ -69,9 +67,9 @@
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
-                        ds)
+                        ds, decimals=3)
                 yield FieldValuesTest(
-                        pf_fn, field, ds)
+                        pf_fn, field, ds, decimals=3)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6021af872bc0/
changeset:   6021af872bc0
branch:      yt
user:        brittonsmith
date:        2012-11-14 16:15:53
summary:     Merged.
affected #:  2 files

diff -r 8df4f0a5ed5fef39f09c0dda55224979a3ebc07b -r 6021af872bc06755516426d6fc8f3acc8e49c81d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -274,6 +274,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):


diff -r 8df4f0a5ed5fef39f09c0dda55224979a3ebc07b -r 6021af872bc06755516426d6fc8f3acc8e49c81d yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -20,6 +20,7 @@
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
+stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e2d94c653c5a/
changeset:   e2d94c653c5a
branch:      yt
user:        samskillman
date:        2012-11-14 19:18:30
summary:     Only test the FieldValues once, not once for each projection direction. Swap out
the assert_rel_equal to the more forgiving (of comparing zeros) numpy.testing.assert_allclose.
affected #:  2 files

diff -r 6021af872bc06755516426d6fc8f3acc8e49c81d -r e2d94c653c5ac243017f361ff4d20fb1c0522e57 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -62,14 +62,14 @@
     for field in fields:
         yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
-        for axis in [0, 1, 2]:
-            for ds in dso:
+        for ds in dso:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds, decimals=3)
-                yield FieldValuesTest(
-                        pf_fn, field, ds, decimals=3)
+            yield FieldValuesTest(
+                    pf_fn, field, ds, decimals=3)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 


diff -r 6021af872bc06755516426d6fc8f3acc8e49c81d -r e2d94c653c5ac243017f361ff4d20fb1c0522e57 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -309,7 +309,7 @@
             assert_equal(new_result, old_result, 
                          err_msg=err_msg, verbose=True)
         else:
-            assert_rel_equal(new_result, old_result, self.decimals,
+            assert_allclose(new_result, old_result, 10.**(-self.decimals),
                              err_msg=err_msg, verbose=True)
 
 class AllFieldValuesTest(AnswerTestingTest):
@@ -370,8 +370,8 @@
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_rel_equal(new_result[k], old_result[k], 
-                                 self.decimals, err_msg=err_msg)
+                assert_allclose(new_result[k], old_result[k], 
+                                 10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b0dcdb34be9e/
changeset:   b0dcdb34be9e
branch:      yt
user:        ngoldbaum
date:        2012-11-14 21:09:58
summary:     Fixing an error in grid_traversal.pyx
affected #:  1 file

diff -r e2d94c653c5ac243017f361ff4d20fb1c0522e57 -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -989,9 +989,9 @@
             tmax[i] = 1e60
     # We have to jumpstart our calculation
     for i in range(3):
-        if cur_ind[i] == vc.dims[i] and step[i] == 1:
+        if cur_ind[i] == vc.dims[i] and step[i] >= 0:
             return 0
-        if cur_ind[i] == -1 and step[i] == -1:
+        if cur_ind[i] == -1 and step[i] <= -1:
             return 0
     enter_t = intersect_t
     hit = 0



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5e0edcf60e2b/
changeset:   5e0edcf60e2b
branch:      yt
user:        ngoldbaum
date:        2012-09-26 23:14:25
summary:     Merging.
affected #:  22 files

diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
@@ -176,11 +167,13 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,143 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.funcs import *
+from numpy.testing import assert_array_equal
+
+def amrspace(extent, levels=7, cells=8):
+    """Creates two numpy arrays representing the left and right bounds of 
+    an AMR grid as well as an array for the AMR level of each cell.
+
+    Parameters
+    ----------
+    extent : array-like
+        This a sequence of length 2*ndims that is the bounds of each dimension.
+        For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+        A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+    levels : int or sequence of ints, optional
+        This is the number of AMR refinement levels.  If given as a sequence (of
+        length ndims), then each dimension will be refined down to this level.
+        All values in this array must be the same or zero.  A zero valued dimension
+        indicates that this dim should not be refined.  Taking the 3D cylindrical
+        example above if we don't want refine theta but want r and z at 5 we would 
+        set levels=(5, 5, 0).
+    cells : int, optional
+        This is the number of cells per refinement level.
+
+    Returns
+    -------
+    left : float ndarray, shape=(npoints, ndims)
+        The left AMR grid points.
+    right : float ndarray, shape=(npoints, ndims)
+        The right AMR grid points.
+    level : int ndarray, shape=(npoints,)
+        The AMR level for each point.
+
+    Examples
+    --------
+    >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+    >>> print l
+    [[ 0.     1.     0.   ]
+     [ 0.25   1.     0.   ]
+     [ 0.     1.125  0.   ]
+     [ 0.25   1.125  0.   ]
+     [ 0.5    1.     0.   ]
+     [ 0.     1.25   0.   ]
+     [ 0.5    1.25   0.   ]
+     [ 1.     1.     0.   ]
+     [ 0.     1.5    0.   ]
+     [ 1.     1.5    0.   ]]
+
+    """
+    extent = np.asarray(extent, dtype='f8')
+    dextent = extent[1::2] - extent[::2]
+    ndims = len(dextent)
+
+    if isinstance(levels, int):
+        minlvl = maxlvl = levels
+        levels = np.array([levels]*ndims, dtype='int32')
+    else:
+        levels = np.asarray(levels, dtype='int32')
+        minlvl = levels.min()
+        maxlvl = levels.max()
+        if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+            raise ValueError("all levels must have the same value or zero.")
+    dims_zero = (levels == 0)
+    dims_nonzero = ~dims_zero
+    ndims_nonzero = dims_nonzero.sum()
+
+    npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+    left = np.empty((npoints, ndims), dtype='float64')
+    right = np.empty((npoints, ndims), dtype='float64')
+    level = np.empty(npoints, dtype='int32')
+
+    # fill zero dims
+    left[:,dims_zero] = extent[::2][dims_zero]
+    right[:,dims_zero] = extent[1::2][dims_zero]
+
+    # fill non-zero dims
+    dcell = 1.0 / cells
+    left_slice =  tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+        dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+    right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+        dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+    left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+    lng_zero = left_norm_grid[:,dims_zero]
+    lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+    right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+    rng_zero = right_norm_grid[:,dims_zero]
+    rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+    level[0] = maxlvl
+    left[0,:] = extent[::2]
+    right[0,dims_zero] = extent[1::2][dims_zero]
+    right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+    for i, lvl in enumerate(range(maxlvl, 0, -1)):
+        start = (cells**ndims_nonzero - 1)*i + 1
+        stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+        dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+        level[start:stop] = lvl
+        left[start:stop,dims_zero] = lng_zero
+        left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+        right[start:stop,dims_zero] = rng_zero
+        right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+    return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if negative:
+        offset = 0.5
+    else:
+        offset = 0.0
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field in fields)
+    ug = load_uniform_grid(data, ndims, 1.0)
+    return ug


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -67,6 +67,8 @@
             t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
             east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
@@ -82,7 +84,7 @@
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a an orientable object.
+        to an orientable object.
 
         Parameters
         ----------


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+    pass
+
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
+
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -872,11 +872,15 @@
         return names
 
     def _send_zmq(self):
-        from IPython.zmq.pylab.backend_inline import \
-                    send_figure
+        try:
+            # pre-IPython v0.14        
+            from IPython.zmq.pylab.backend_inline import send_figure as display
+        except ImportError:
+            # IPython v0.14+ 
+            from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
-            send_figure(v.figure)
+            display(v.figure)
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -195,7 +195,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self.rotation_vector = self.orienter.north_vector
+        self.rotation_vector = self.orienter.unit_vectors[1]
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -282,7 +282,7 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.orienter.north_vector
+            north_vector = self.orienter.unit_vectors[1]
         if normal_vector is None:
             normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
@@ -587,7 +587,7 @@
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
-        north_vector = self.orienter.north_vector
+        north_vector = self.orienter.unit_vectors[1]
         self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4cb47cdf4520/
changeset:   4cb47cdf4520
branch:      yt
user:        ngoldbaum
date:        2012-09-27 21:03:32
summary:     Some bugfixes for OffAxisProjectionPlot
affected #:  2 files

diff -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f -r 4cb47cdf45206350ccf754d76d8a78e9b01eaeaf yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1,3 +1,4 @@
+
 """
 A plotting mechanism based on the idea of a "window" into the data.
 
@@ -184,15 +185,14 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
-    # Transforming to the cutting plane coordinate system
-    center = np.array(center)
-    center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
-    (normal,perp1,perp2) = ortho_find(normal)
-    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
-    center = np.dot(mat,center)
-    width = width
+    if width.shape == (2,):
+        # Transforming to the cutting plane coordinate system
+        center = np.array(center)
+        center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+        (normal,perp1,perp2) = ortho_find(normal)
+        mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+        center = np.dot(mat,center)
     
-    if width.shape == (2,):
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     else:
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
@@ -270,12 +270,13 @@
                                                      bounds, self.buff_size, 
                                                      self.antialias, 
                                                      periodic=self._periodic)
-        elif 'Projection' in class_name or 'Slice' in class_name:
+        elif 'AMRQuadTreeProj' in class_name or 'Slice' in class_name:
             self._frb = FixedResolutionBuffer(self.data_source, 
                                               bounds, self.buff_size, 
                                               self.antialias, 
                                               periodic=self._periodic)
         else:
+            pdb.set_trace()
             raise RuntimeError("Failed to repixelize.")
         if old_fields is None:
             self._frb._get_data_source_fields()
@@ -367,7 +368,7 @@
             mylog.info("ylim = %f %f" %self.ylim)
         else:
             self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
+            self.ylim = bounds[2:4]
             
     @invalidate_data
     def set_width(self, width, unit = '1'):
@@ -1121,8 +1122,9 @@
             image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
                                         ds.width, ds.resolution, item,
                                         weight=ds.weight_field, volume=ds.volume,
-                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated)
-            self.internal_dict[item] = image
+                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                        north_vector=ds.north_vector)
+            self.internal_dict[item] = image.T
         return image
     
     def _get_data_source_fields(self):


diff -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f -r 4cb47cdf45206350ccf754d76d8a78e9b01eaeaf yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1666,8 +1666,8 @@
 data_object_registry["projection_camera"] = ProjectionCamera
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, 
-                        volume = None, no_ghost = False, interpolated = False):
+                        field, weight=None, volume=None, no_ghost=False, 
+                        interpolated=False, north_vector=None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1707,10 +1707,14 @@
         accuracy/smoothness in resulting image.  The effects are
         less notable when the transfer function is smooth and
         broad. Default: True
-    interpolated : optional, default False
+    interpolated : bool, optional
         If True, the data is first interpolated to vertex-centered data, 
         then tri-linearly interpolated along the ray. Not suggested for 
         quantitative studies.
+    north_vector : array-like, optional
+        A vector defining the 'up' direction in the plot.  This                                                                  
+        option sets the orientation of the slicing plane.  If not                                                                
+        set, an arbitrary grid-aligned north-vector is chosen.              
 
     Returns
     -------
@@ -1726,8 +1730,9 @@
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
-            field, weight=weight, pf=pf, volume=volume,
-            no_ghost=no_ghost, interpolated=interpolated)
+                               field, weight=weight, pf=pf, volume=volume,
+                               no_ghost=no_ghost, interpolated=interpolated,
+                               north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f77fd14bdaa7/
changeset:   f77fd14bdaa7
branch:      yt
user:        jzuhone
date:        2012-10-05 00:47:02
summary:     Merging
affected #:  1 file

diff -r 4cb47cdf45206350ccf754d76d8a78e9b01eaeaf -r f77fd14bdaa7190ce4b3677ced28eeb0749d668a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -753,8 +753,8 @@
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                labels = [r'$\rm{x'+axes_unit_label+'}$',
+                          r'$\rm{y'+axes_unit_label+'}$']
 
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d5228350c515/
changeset:   d5228350c515
branch:      yt
user:        jzuhone
date:        2012-10-06 20:54:46
summary:     Support for 1D and 2D Athena datasets.
affected #:  1 file

diff -r f77fd14bdaa7190ce4b3677ced28eeb0749d668a -r d5228350c51505e38a21defe051d28ca8a6e66d0 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -326,7 +326,12 @@
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +339,9 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        ND = self.dimensionality
+        print ND
+        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ae18bc654a02/
changeset:   ae18bc654a02
branch:      yt
user:        jzuhone
date:        2012-10-06 21:39:59
summary:     Merging
affected #:  13 files

diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -400,7 +400,7 @@
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
@@ -429,7 +429,7 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.1.0.tar.gz
 get_ytproject mercurial-2.2.2.tar.gz
@@ -554,11 +554,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
-    [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
-    cd Python-2.7.2
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -65,6 +65,9 @@
     quantity_info, \
     add_quantity
 
+from image_array import \
+    ImageArray
+
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/data_objects/image_array.py
--- /dev/null
+++ b/yt/data_objects/image_array.py
@@ -0,0 +1,271 @@
+"""
+ImageArray Class
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+    Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  """
+
+import numpy as np
+import h5py as h5
+from yt.visualization.image_writer import write_bitmap, write_image
+
+class ImageArray(np.ndarray):
+    r"""A custom Numpy ndarray used for images.
+
+    This differs from ndarray in that you can optionally specify an
+    info dictionary which is used later in saving, and can be accessed with
+    ImageArray.info.
+
+    Parameters
+    ----------
+    input_array: array_like
+        A numpy ndarray, or list. 
+
+    Other Parameters
+    ----------------
+    info: dictionary
+        Contains information to be stored with image.
+
+    Returns
+    -------
+    obj: ImageArray object 
+
+    Raises
+    ------
+    None
+
+    See Also
+    --------
+    numpy.ndarray : Inherits
+
+    Notes
+    -----
+
+    References
+    ----------
+
+    Examples
+    --------
+    These are written in doctest format, and should illustrate how to
+    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    a plot collection, 'c' for a center, and 'L' for a vector. 
+
+    >>> im = np.zeros([64,128,3])
+    >>> for i in xrange(im.shape[0]):
+    >>>     for k in xrange(im.shape[2]):
+    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    >>> im_arr = ImageArray(im, info=myinfo)
+    >>> im_arr.save('test_ImageArray')
+
+    Numpy ndarray documentation appended:
+
+    """
+    def __new__(cls, input_array, info=None):
+        # Input array is an already formed ndarray instance
+        # We first cast to be our class type
+        obj = np.asarray(input_array).view(cls)
+        # add the new attribute to the created instance
+        if info is None:
+            info = {}
+        obj.info = info
+        # Finally, we must return the newly created object:
+        return obj
+
+    def __array_finalize__(self, obj):
+        # see InfoArray.__array_finalize__ for comments
+        if obj is None: return
+        self.info = getattr(obj, 'info', None)
+
+    def write_hdf5(self, filename):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_hdf5('test_ImageArray.h5')
+
+        """
+        array_name = self.info.get("name","image")
+
+        f = h5.File(filename)
+        if array_name in f.keys():
+            del f[array_name]
+        d = f.create_dataset(array_name, data=self)
+        for k, v in self.info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    def write_png(self, filename, clip_ratio=None):
+        r"""Writes ImageArray to png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_png('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if clip_ratio is not None:
+            return write_bitmap(self.swapaxes(0, 1), filename,
+                                clip_ratio * self.std())
+        else:
+            return write_bitmap(self.swapaxes(0, 1), filename)
+
+    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+        r"""Writes a single channel of the ImageArray to a png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Other Parameters
+        ----------------
+        channel: int
+            Which channel to write out as an image. Defaults to 0
+        cmap_name: string
+            Name of the colormap to be used.
+        color_bounds : tuple of floats, optional
+            The min and max to scale between.  Outlying values will be clipped.
+        cmap_name : string, optional
+            An acceptable colormap.  See either yt.visualization.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+        func : function, optional
+            A function to transform the buffer before applying a colormap. 
+
+        Returns
+        -------
+        scaled_image : uint8 image that has been saved
+        
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128])
+        >>> for i in xrange(im.shape[0]):
+        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_image('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if channel is None:
+            return write_image(self.swapaxes(0,1), filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+        else:
+            return write_image(self.swapaxes(0,1)[:,:,channel], filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+
+    def save(self, filename, png=True, hdf5=True):
+        """
+        Saves ImageArray. 
+
+        Arguments:
+          filename: string
+            This should not contain the extension type (.png, .h5, ...)
+
+        Optional Arguments:
+          png: boolean, default True
+            Save to a png
+
+          hdf5: boolean, default True
+            Save to hdf5 file, including info dictionary as attributes.
+
+        """
+        if png:
+            if len(self.shape) > 2:
+                self.write_png("%s.png" % filename)
+            else:
+                self.write_image("%s.png" % filename)
+        if hdf5:
+            self.write_hdf5("%s.h5" % filename)
+
+    __doc__ += np.ndarray.__doc__
+
+if __name__ == "__main__":
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -938,6 +938,25 @@
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
@@ -975,12 +994,37 @@
     units of Gauss. If you use MKS, make sure to write your own
     MagneticEnergy field to deal with non-unitary \mu_0.
     """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
 add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"",
-          validators = [ValidateDataField("Bx"),
-                        ValidateDataField("By"),
-                        ValidateDataField("Bz")])
+          units=r"\rm{ergs}\/\rm{cm}^{-3}",
+          display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    BMagnitude field to deal with non-unitary \mu_0.
+    """
+    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+          function=_BMagnitude,
+          display_name=r"|B|", units=r"\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    PlasmaBeta field to deal with non-unitary \mu_0.
+    """
+    return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+          function=_PlasmaBeta,
+          display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+    return data['MagneticEnergy']
+add_field("MagneticPressure",
+          function=_MagneticPressure,
+          display_name=r"\rm{Magnetic}\/\rm{Energy}",
+          units="\rm{ergs}\/\rm{cm}^{-3}")
 
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -97,7 +98,10 @@
     if fn1.endswith("_Fraction"):
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
-                  display_name="%s\/Density" % fn1.split("_")[0])
+                  display_name="%s\/Density" % fn1.split("_")[0],
+                  units = r"\rm{g}/\rm{cm}^3",
+                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  )
 
 def _get_convert(fname):
     def _conv(data):
@@ -106,7 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^3",
+                projected_units = r"\rm{g}/\rm{cm}^2"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -203,6 +208,7 @@
     add_field(f, TranslationFunc(v),
               take_log=KnownFLASHFields[v].take_log,
               units = ff._units, display_name=dname,
+              projected_units = ff._projected_units,
               particle_type = pfield)
 
 def _convertParticleMassMsun(data):
@@ -254,3 +260,43 @@
 
 add_field("GasEnergy", function=_GasEnergy, 
           units=r"\rm{ergs}/\rm{g}")
+
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+    if pf['unitsystem'].lower() == "cgs":
+         factor = 1
+    if pf['unitsystem'].lower() == "si":
+         factor = np.sqrt(4*np.pi/1e7)
+    if pf['unitsystem'].lower() == "none":
+         factor = np.sqrt(4*np.pi)
+    else:
+        raise RuntimeError("Runtime parameter unitsystem with"
+                           "value %s is unrecognized" % pf['unitsystem'])
+    return factor
+
+def _Bx(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+          units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -62,7 +62,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection
+    ParticleTrajectoryCollection, ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
         cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
         for i in range(pos_x.shape[0]):
             kdtree_utils.kd_insert3(self.tree,
-                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+                pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
 
     def __dealloc__(self):
         kdtree_utils.kd_free(self.tree)
@@ -616,7 +616,7 @@
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
     cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
-    cdef int nstars
+    cdef int nstars, dti, i, j
     cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
         dvs[i] = temp
     for dti in range(vri.n_samples): 
         # Now we add the contribution from stars
+        kdtree_utils.kd_res_rewind(ballq)
         for i in range(nstars):
             kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
             colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -655,20 +656,22 @@
             gexp = (px - pos[0])*(px - pos[0]) \
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
-            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
-            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+            gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
+            for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]
         FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids, vri.grey_opacity)
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
+    kdtree_utils.kd_res_free(ballq)
 
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
     cdef kdtree_utils.kdtree **trees
+    cdef object tree_containers
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -709,6 +712,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
         cdef star_kdtree_container skdc
+        self.tree_containers = star_list
         if star_list is None:
             self.trees = NULL
         else:
@@ -719,10 +723,15 @@
                 self.trees[i] = skdc.tree
 
     cdef void setup(self, PartitionedGrid pg):
+        cdef star_kdtree_container star_tree
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
+            star_tree = self.tree_containers[pg.parent_grid_id]
             self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+            self.vra.star_er = 2.326 * star_tree.sigma
+            self.vra.star_coeff = star_tree.coeff
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,6 +28,7 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 import _MPL
 import numpy as np
 import weakref
@@ -384,3 +385,28 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                   north_vector=ds.north_vector)
+        self[item] = buff.swapaxes(0,1)
+        return buff
+
+


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -116,7 +116,7 @@
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
-def write_bitmap(bitmap_array, filename, max_val = None, transpose=True):
+def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
     r"""Write out a bitmapped image directly to a PNG file.
 
     This accepts a three- or four-channel `bitmap_array`.  If the image is not
@@ -152,8 +152,7 @@
         alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
         bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
-        for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+        bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
         au.write_png(bitmap_array.copy(), filename)
     else:


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -146,7 +146,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
@@ -432,6 +434,9 @@
             iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        # scale into data units
+        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
         for i in range(self.data_size[0]):
             for j in range(self.data_size[1]):
                 plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
@@ -650,8 +655,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -723,9 +728,13 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         from matplotlib.patches import Arrow
         # Now convert the pixels to code information
-        x, y = self.convert_to_plot(plot, self.pos)
+        x, y = self.convert_to_plot(plot, pos)
         dx, dy = self.convert_to_plot(plot, self.code_size, False)
         arrow = Arrow(x, y, dx, dy, **self.plot_args)
         plot._axes.add_patch(arrow)
@@ -745,12 +754,13 @@
         self.text_args = text_args
 
     def __call__(self, plot):
-
-
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         width,height = plot.image._A.shape
-        x,y = self.convert_to_plot(plot, self.pos)
-        x,y = x/width,y/height
-
+        x,y = self.convert_to_plot(plot, pos)
+        
         plot._axes.text(x, y, self.text, **self.text_args)
 
 class MarkerAnnotateCallback(PlotCallback):


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1,4 +1,3 @@
-
 """
 A plotting mechanism based on the idea of a "window" into the data.
 
@@ -44,11 +43,11 @@
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
-from .volume_rendering.api import off_axis_projection
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 from yt.config import ytcfg
 
@@ -108,7 +107,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -192,6 +194,7 @@
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
+        width = width
     
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     else:
@@ -258,26 +261,14 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        bounds = self.xlim+self.ylim
-        class_name = self.data_source.__class__.__name__
-        if 'OffAxisProjection' in class_name:
-            self._frb = OffAxisProjectionDummyFRB(self.data_source,
-                                                  bounds, self.buff_size,
-                                                  self.antialias,
-                                                  periodic=self._periodic)
-        elif 'Cutting' in class_name:
-            self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                     bounds, self.buff_size, 
-                                                     self.antialias, 
-                                                     periodic=self._periodic)
-        elif 'AMRQuadTreeProj' in class_name or 'Slice' in class_name:
-            self._frb = FixedResolutionBuffer(self.data_source, 
-                                              bounds, self.buff_size, 
-                                              self.antialias, 
-                                              periodic=self._periodic)
+        if hasattr(self,'zlim'):
+            bounds = self.xlim+self.ylim+self.zlim
         else:
-            pdb.set_trace()
-            raise RuntimeError("Failed to repixelize.")
+            bounds = self.xlim+self.ylim
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -318,6 +309,7 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+                    
 
     @invalidate_data
     def pan(self, deltas):
@@ -364,12 +356,16 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:4]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if hasattr(self,'zlim'):
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
@@ -415,14 +411,20 @@
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
         
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -753,8 +755,8 @@
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{x'+axes_unit_label+'}$',
-                          r'$\rm{y'+axes_unit_label+'}$']
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
 
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
@@ -850,16 +852,9 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            if 'OffAxis' in self.data_source.__class__.__name__:
-                type = 'OffAxisProjection'
-            else:
-                type = 'Projection'
+        type = self._plot_type
+        if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
-        if 'Cutting' in self.data_source.__class__.__name__:
-            type = 'OffAxisSlice'
         names = []
         for k, v in self.plots.iteritems():
             if axis:
@@ -907,6 +902,9 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  origin='center-window'):
         r"""Creates a slice plot from a parameter file
@@ -982,6 +980,9 @@
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
+    _plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
@@ -1061,6 +1062,9 @@
         self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
@@ -1108,32 +1112,10 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
-class OffAxisProjectionDummyFRB(FixedResolutionBuffer):
-    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
-                 periodic = False):
-        self.internal_dict = {}
-        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
-
-    def __getitem__(self, item):
-        try:
-            image = self.internal_dict[item]
-        except KeyError:
-            ds = self.data_source
-            image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
-                                        ds.width, ds.resolution, item,
-                                        weight=ds.weight_field, volume=ds.volume,
-                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated,
-                                        north_vector=ds.north_vector)
-            self.internal_dict[item] = image.T
-        return image
-    
-    def _get_data_source_fields(self):
-        for f in self.data_source.fields:
-            self[f] = None
-
 class OffAxisProjectionDummyDataSource(object):
     _type_name = 'proj'
     proj_style = 'integrate'
+    _key_fields = []
     def __init__(self, center, pf, normal_vector, width, fields, 
                  interpolated, resolution = (800,800), weight=None,  
                  volume=None, no_ghost=False, le=None, re=None, 
@@ -1154,6 +1136,9 @@
         self.north_vector = north_vector
 
 class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
@@ -1204,7 +1189,6 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        self.OffAxisProjection = True
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
         # Hard-coding the resolution for now
         fields = ensure_list(fields)[:]


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -37,6 +37,7 @@
     arr_ang2pix_nest, arr_fisheye_vectors
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.utilities.orientation import Orientation
+from yt.data_objects.api import ImageArray
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -301,7 +302,11 @@
                 np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
+    star_trees = None
     def get_sampler(self, args):
+        kwargs = {}
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
@@ -312,9 +317,10 @@
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
+                    light_rgba=self.light_rgba, **kwargs)
         else:
-            sampler = self._sampler_object(*args)
+            sampler = self._sampler_object(*args, **kwargs)
+        print sampler, kwargs
         return sampler
 
     def finalize_image(self, image):
@@ -342,15 +348,21 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.comm.rank is 0 and fn is not None:
-            if clip_ratio is not None:
-                write_bitmap(image, fn, clip_ratio * image.std())
-            else:
-                write_bitmap(image, fn)
-
+            image.write_png(fn, clip_ratio=clip_ratio)
 
     def initialize_source(self):
         return self.volume.initialize_source()
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'east_vector':self.orienter.unit_vectors[0],
+                     'north_vector':self.orienter.unit_vectors[1],
+                     'normal_vector':self.orienter.unit_vectors[2],
+                     'width':self.width,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
         r"""Ray-cast the camera.
@@ -385,7 +397,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clip_ratio, image)
         return image
 
@@ -665,7 +679,7 @@
 class PerspectiveCamera(Camera):
     expand_factor = 1.0
     def __init__(self, *args, **kwargs):
-        expand_factor = kwargs.pop('expand_factor', 1.0)
+        self.expand_factor = kwargs.pop('expand_factor', 1.0)
         Camera.__init__(self, *args, **kwargs)
 
     def get_sampler_args(self, image):
@@ -704,6 +718,27 @@
                 self.transfer_function, self.sub_samples)
         return args
 
+    def _render(self, double_check, num_threads, image, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
+            sampler(brick, num_threads=num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+
+        pbar.finish()
+        image = sampler.aimage
+        self.finalize_image(image)
+        return image
+
+
     def finalize_image(self, image):
         image.shape = self.resolution[0], self.resolution[0], 3
 
@@ -720,6 +755,9 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
@@ -733,6 +771,12 @@
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples
@@ -787,6 +831,15 @@
 
         return image
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'center':self.center,
+                     'radius':self.radius,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
@@ -814,7 +867,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clim, image, label = label)
         return image
 
@@ -1250,8 +1305,9 @@
 
         if self.image is not None:
             del self.image
+        image = ImageArray(image,
+                           info=self.get_information())
         self.image = image
-       
         return image
 
     def save_image(self, fn, clip_ratio=None):
@@ -1656,7 +1712,9 @@
 
         self.initialize_source()
 
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
 
         self.save_image(fn, clip_ratio, image)
 
@@ -1666,8 +1724,9 @@
 data_object_registry["projection_camera"] = ProjectionCamera
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight=None, volume=None, no_ghost=False, 
-                        interpolated=False, north_vector=None):
+                        field, weight = None, 
+                        volume = None, no_ghost = False, interpolated = False,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1707,14 +1766,10 @@
         accuracy/smoothness in resulting image.  The effects are
         less notable when the transfer function is smooth and
         broad. Default: True
-    interpolated : bool, optional
+    interpolated : optional, default False
         If True, the data is first interpolated to vertex-centered data, 
         then tri-linearly interpolated along the ray. Not suggested for 
         quantitative studies.
-    north_vector : array-like, optional
-        A vector defining the 'up' direction in the plot.  This                                                                  
-        option sets the orientation of the slicing plane.  If not                                                                
-        set, an arbitrary grid-aligned north-vector is chosen.              
 
     Returns
     -------
@@ -1731,7 +1786,7 @@
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
                                field, weight=weight, pf=pf, volume=volume,
-                               no_ghost=no_ghost, interpolated=interpolated,
+                               no_ghost=no_ghost, interpolated=interpolated, 
                                north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/923899f7509b/
changeset:   923899f7509b
branch:      yt
user:        jzuhone
date:        2012-10-10 20:53:13
summary:     Updates to the Athena frontend for compatibility with 2D datasets and magnetic fields.

The FLASH frontend was missing the proton mass constant for a field definition.
affected #:  3 files

diff -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 -r 923899f7509b566ba4193bfea7a5deb918e630fd yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -340,7 +340,6 @@
         self.boundary_conditions = [1]*6
 
         ND = self.dimensionality
-        print ND
         self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \


diff -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 -r 923899f7509b566ba4193bfea7a5deb918e630fd yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 -r 923899f7509b566ba4193bfea7a5deb918e630fd yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5c39b3425b47/
changeset:   5c39b3425b47
branch:      yt
user:        jzuhone
date:        2012-11-09 22:21:01
summary:     Merging
affected #:  64 files

diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -220,11 +220,24 @@
         echo "  * libncurses5-dev"
         echo "  * zip"
         echo "  * uuid-dev"
+        echo "  * libfreetype6-dev"
+        echo "  * tk-dev"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
         echo
+        echo
+        echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+        echo " so you can use yt without the activate script, you might "
+        echo " want to consider turning off LIBZ and FREETYPE in this"
+        echo " install script by editing this file and setting"
+        echo
+        echo " INST_ZLIB=0"
+        echo " INST_FTYPE=0"
+        echo 
+        echo " to avoid conflicts with other command-line programs "
+        echo " (like eog and evince, for example)."
     fi
     if [ ! -z "${CFLAGS}" ]
     then


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,6 +38,7 @@
     inline = 'False',
     numthreads = '-1',
     __withinreason = 'False',
+    __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
     __global_parallel_size = '1',
@@ -53,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
@@ -708,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -724,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -748,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -767,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask
@@ -855,6 +859,22 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
+    def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
+        axis = self.axis
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+                         plot_type=plot_type)
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
@@ -916,26 +936,6 @@
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
-    def to_pw(self):
-        r"""Create a :class:`~yt.visualization.plot_window.PlotWindow` from this
-        object.
-
-        This is a bare-bones mechanism of creating a plot window from this
-        object, which can then be moved around, zoomed, and on and on.  All
-        behavior of the plot window is relegated to that routine.
-        """
-        axis = self.axis
-        center = self.get_field_parameter("center")
-        if center is None:
-            center = (self.pf.domain_right_edge
-                    + self.pf.domain_left_edge)/2.0
-        width = (1.0, 'unitary')
-        from yt.visualization.plot_window import \
-            PWViewerMPL, GetBoundsAndCenter
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
-        pw = PWViewerMPL(self, bounds)
-        return pw
-
     def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
         """
         This returns a uniform grid of points between *LE* and *RE*,
@@ -1193,6 +1193,18 @@
     def hub_upload(self):
         self._mrep.upload()
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Slice')
+        return pw
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1355,6 +1367,30 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        normal = self.normal
+        center = self.center
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetOffAxisBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
+                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
@@ -1762,6 +1798,18 @@
             convs[:] = 1.0
         return dls, convs
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def get_data(self, fields = None):
         if fields is None: fields = ensure_list(self.fields)[:]
         else: fields = ensure_list(fields)
@@ -2254,6 +2302,18 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def _project_grid(self, grid, fields, zero_out):
         # We split this next bit into two sections to try to limit the IO load
         # on the system.  This way, we perserve grid state (@restore_grid_state
@@ -3445,10 +3505,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3474,17 +3531,45 @@
                  pf=None, **kwargs):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
+
         AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
@@ -3602,7 +3687,7 @@
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    _con_args = ('level', 'left_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """A 3D region with all data extracted to a single, specified
@@ -3629,8 +3714,9 @@
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = np.array(left_edge)
         self.level = level
-        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = np.array(dims,dtype='int32')
+        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+        self.dds = self.pf.domain_width/rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):
@@ -160,7 +221,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,353 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_covering_grid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            yield assert_equal, cg["Ones"].max(), 1.0
+            yield assert_equal, cg["Ones"].min(), 1.0
+            yield assert_equal, cg["CellVolume"].sum(), pf.domain_width.prod()
+            for g in pf.h.grids:
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_profiles.py
--- /dev/null
+++ b/yt/data_objects/tests/test_profiles.py
@@ -0,0 +1,74 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
+
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    for nb in [8, 16, 32, 64]:
+        for lr in [True, False]:
+            # We log all the fields or don't log 'em all.  No need to do them
+            # individually.
+            for lf in [True, False]: 
+                # We have the min and the max, but to avoid cutting them off
+                # since we aren't doing end-collect, we cut a bit off the edges
+                for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
+                    p1d = BinnedProfile1D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        lr, end_collect=ec)
+                    p1d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p1d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+                    p2d = BinnedProfile2D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        lr, end_collect=ec)
+                    p2d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p2d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+                    p3d = BinnedProfile3D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        nb, "Dinosaurs", dmi*e1, dma*e2, lf,
+                        lr, end_collect=ec)
+                    p3d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p3d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+            p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
+            p1d.add_fields("Ones", weight=None)
+            av = nv / nb
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)*av
+            # We re-bin ones with a weight now
+            p1d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)
+
+            p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False)
+            p2d.add_fields("Ones", weight=None)
+            av = nv / nb**2
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))*av
+            # We re-bin ones with a weight now
+            p2d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))
+
+            p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False,
+                                      nb, "z", 0.0, 1.0, False)
+            p3d.add_fields("Ones", weight=None)
+            av = nv / nb**3
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
+            # We re-bin ones with a weight now
+            p3d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_projection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_projection.py
@@ -0,0 +1,59 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_projection():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        dd = pf.h.all_data()
+        rho_tot = dd.quantities["TotalQuantity"]("Density")[0]
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        # Some simple projection tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
+                yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
+                yield assert_equal, proj["Ones"].min(), 1.0
+                yield assert_equal, proj["Ones"].max(), 1.0
+                yield assert_equal, np.unique(proj["px"]), uc[xax]
+                yield assert_equal, np.unique(proj["py"]), uc[yax]
+                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
+            # wf == None
+            yield assert_equal, wf, None
+            v1 = proj["Density"].sum()
+            v2 = (dd["Density"] * dd["d%s" % an]).sum()
+            yield assert_rel_equal, v1, v2, 10
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,33 @@
+from yt.testing import *
+
+def test_ray():
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
+
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
+
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -478,7 +478,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,9 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -353,7 +355,8 @@
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
                 decompose_array(data[key], psize, bbox)
-            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+                                       dtype="int32")
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():
@@ -364,7 +367,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3)
+        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
     handler = StreamHandler(
         grid_left_edges,
@@ -394,3 +397,168 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,8 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook"):
+       "__IPYTHON__" in dir(__builtin__) or \
+       ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
         from yt.gui.reason.extdirect_repl import ExtProgressBar


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -24,7 +24,22 @@
 
 import numpy as np
 from yt.funcs import *
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_almost_equal, \
+    assert_approx_equal, assert_array_almost_equal, assert_equal, \
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
+
+def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -127,17 +142,23 @@
 
     return left, right, level
 
-def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
+                   negative = False, nprocs = 1):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
-    ug = load_uniform_grid(data, ndims, 1.0)
+                 for field,offset in zip(fields,offsets))
+    ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,511 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    prefix = ""
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(self.storage_name)
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result))
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i])
+        
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1095,8 +1095,12 @@
                   )
         else:
             from IPython.config.loader import Config
+            import sys
             cfg = Config()
+            # prepend sys.path with current working directory
+            sys.path.insert(0,'')
             IPython.embed(config=cfg,user_ns=local_ns)
+            
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,37 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/kdtree/test.py
--- a/yt/utilities/kdtree/test.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from Forthon import *
-from fKDpy import *
-import numpy,random
-
-n = 32768
-
-
-fKD.tags = fzeros((64),'i')
-fKD.dist = fzeros((64),'d')
-fKD.pos = fzeros((3,n),'d')
-fKD.nn = 64
-fKD.nparts = n
-fKD.sort = True
-fKD.rearrange = True
-fKD.qv = numpy.array([16./32, 16./32, 16./32])
-
-fp = open('parts.txt','r')
-xpos = []
-ypos = []
-zpos = []
-line = fp.readline()
-while line:
-    line = line.split()
-    xpos.append(float(line[0]))
-    ypos.append(float(line[1]))
-    zpos.append(float(line[2]))
-    line= fp.readline()
-
-fp.close()
-
-
-for k in range(32):
-    for j in range(32):
-        for i in range(32):
-            fKD.pos[0][i + j*32 + k*1024] = float(i)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[1][i + j*32 + k*1024] = float(j)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[2][i + j*32 + k*1024] = float(k)/32 + 1./64 + 0.0001*random.random()
-
-            
-
-#print fKD.pos[0][0],fKD.pos[1][0],fKD.pos[2][0]
-
-create_tree()
-
-
-find_nn_nearest_neighbors()
-
-#print 'next'
-
-#fKD.qv = numpy.array([0., 0., 0.])
-
-#find_nn_nearest_neighbors()
-
-
-#print (fKD.tags - 1)
-#print fKD.dist
-
-free_tree()


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):
@@ -334,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1060,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_kdtrees.py
--- /dev/null
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -0,0 +1,93 @@
+"""
+Unit test the kD trees in yt.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: U of Colorado
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Stephen Skory.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+
+try:
+    from yt.utilities.kdtree.api import *
+except ImportError:
+    mylog.debug("The Fortran kD-Tree did not import correctly.")
+
+from yt.utilities.spatial import cKDTree
+
+def setup():
+    pass
+
+def test_fortran_tree():
+    r"""This test makes sure that the fortran kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    try:
+        fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    except NameError:
+        return
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    fKD.pos[0, :] = points
+    fKD.pos[1, :] = points
+    fKD.pos[2, :] = points
+    fKD.qv = np.empty(3, dtype='float64')
+    fKD.dist = np.empty(4, dtype='float64')
+    fKD.tags = np.empty(4, dtype='int64')
+    fKD.nn = 4
+    fKD.sort = True
+    create_tree(0)
+    # Now we check to make sure that we find the correct nearest neighbors,
+    # which get stored in dist and tags.
+    fKD.qv[:] = 0.999
+    find_nn_nearest_neighbors()
+    # Fix fortran counting.
+    fKD.tags -= 1
+    # Clean up before the tests.
+    free_tree(0)
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(fKD.dist, dist)
+    assert_array_equal(fKD.tags, tags)
+
+def test_cython_tree():
+    r"""This test makes sure that the cython kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    pos = np.empty((4, 3), dtype='float64')
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    pos[:, 0] = points
+    pos[:, 1] = points
+    pos[:, 2] = points
+    kdtree = cKDTree(pos, leafsize = 2)
+    qv = np.array([0.999]*3)
+    res = kdtree.query(qv, 4, period=[1.,1.,1])
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(res[0], dist)
+    assert_array_equal(res[1], tags)
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 
@@ -383,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -406,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -379,7 +379,7 @@
                          take_log=True)
     """
     import matplotlib
-    from ._mpl_imports import *
+    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 
     # If this is rendered as log, then apply now.
     if take_log:
@@ -420,21 +420,22 @@
     else:
         dpi = None
 
-    if filename[-4:] == '.png':
-        suffix = ''
-    else:
+    suffix = os.path.splitext(filename)[1]
+
+    if suffix == '':
         suffix = '.png'
         filename = "%s%s" % (filename, suffix)
-    mylog.info("Saving plot %s", fn)
+    mylog.info("Saving plot %s", filename)
     if suffix == ".png":
         canvas = FigureCanvasAgg(fig)
     elif suffix == ".pdf":
         canvas = FigureCanvasPdf(fig)
     elif suffix in (".eps", ".ps"):
-        canvas = FigureCanvasPS
+        canvas = FigureCanvasPS(fig)
     else:
         mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
         canvas = FigureCanvasAgg(fig)
+
     canvas.print_figure(filename)
     return filename
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -211,7 +213,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +232,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -288,12 +294,16 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
@@ -364,38 +374,23 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
+        self.dens = density
+        if plot_args is None: plot_args = {}
         self.plot_args = plot_args
         
     def __call__(self, plot):
@@ -404,43 +399,26 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = np.sqrt(pixX**2 + pixY**2)
-        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
-            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        # scale into data units
-        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
-        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -461,30 +439,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -679,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -708,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -778,13 +735,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -850,6 +812,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -866,18 +829,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1124,3 +1088,170 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time * plot.data.pf['Time']
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -209,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -239,6 +243,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -249,11 +257,40 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +310,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -695,6 +731,15 @@
 
     """
     _current_field = None
+    _frb_generator = None
+    _plot_type = None
+
+    def __init__(self, *args, **kwargs):
+        if self._frb_generator == None:
+            self._frb_generator = kwargs.pop("frb_generator")
+        if self._plot_type == None:
+            self._plot_type = kwargs.pop("plot_type")
+        PWViewer.__init__(self, *args, **kwargs)
 
     def _setup_plots(self):
         if self._current_field is not None:
@@ -758,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -774,17 +821,18 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
             if md['units'] == None or md['units'] == '':
                 label = field_name
             else:
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -845,17 +893,22 @@
         >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
+        names = []
+        if mpl_kwargs is None: mpl_kwargs = {}
         if name == None:
             name = str(self.pf)
-        elif name.endswith('.png'):
-            return v.save(name)
-        if mpl_kwargs is None: mpl_kwargs = {}
+        suffix = os.path.splitext(name)[1]
+        if suffix != '':
+            for k, v in self.plots.iteritems():
+                names.append(v.save(name,mpl_kwargs))
+            return names
         axis = axis_names[self.data_source.axis]
         weight = None
         type = self._plot_type
         if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
-        names = []
+        if 'Cutting' in self.data_source.__class__.__name__:
+            type = 'OffAxisSlice'
         for k, v in self.plots.iteritems():
             if axis:
                 n = "%s_%s_%s_%s" % (name, type, axis, k)
@@ -906,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -962,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -973,8 +1028,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -984,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1044,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1055,8 +1116,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1066,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1103,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1142,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1220,6 +1285,7 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
+    _frb_generator = FixedResolutionBuffer
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler
@@ -1397,24 +1463,25 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        if name[-4:] == '.png':
-            suffix = ''
+        suffix = os.path.splitext(name)[1]
+        
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+        mylog.info("Saving plot %s", name)
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
         else:
-            suffix = '.png'
-        fn = "%s%s" % (name, suffix)
-        mylog.info("Saving plot %s", fn)
-        if canvas is None:
-            if suffix == ".png":
-                canvas = FigureCanvasAgg(self.figure)
-            elif suffix == ".pdf":
-                canvas = FigureCanvasPdf(self.figure)
-            elif suffix in (".eps", ".ps"):
-                canvas = FigureCanvasPS
-            else:
-                mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-                canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn,**mpl_kwargs)
-        return fn
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
+
+
+        canvas.print_figure(name,**mpl_kwargs)
+        return name
 
     def _get_best_layout(self, size):
         aspect = 1.0*size[0]/size[1]
@@ -1476,4 +1543,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -146,7 +148,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):
@@ -205,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1449,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1484,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1527,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/588ee2163479/
changeset:   588ee2163479
branch:      yt
user:        MatthewTurk
date:        2012-11-14 21:47:28
summary:     Merged in jzuhone/yt (pull request #339)
affected #:  5 files

diff -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -326,7 +326,12 @@
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +339,8 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        ND = self.dimensionality
+        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0


diff -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 







https://bitbucket.org/yt_analysis/yt-3.0/changeset/881227d39ae8/
changeset:   881227d39ae8
branch:      yt
user:        jzuhone
date:        2012-11-14 21:42:05
summary:     Determining top-level domain dimensions in FLASH. PARAMESH uses nblockx, nblocky, nblockz to determine the number of blocks on a side. Uniform Grid uses iprocs, jprocs, kprocs. Previously we weren't checking for the latter possibility. The only way I can think of at the moment to distinguish between the two is to check for a parameter that would be part of PARAMESH and not Uniform Grid, such as 'lrefine_min'.
affected #:  2 files

diff -r e2d94c653c5ac243017f361ff4d20fb1c0522e57 -r 881227d39ae813b04ccca2c3a29b0100d14c586b setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,3 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
-
-[nosetests]
-detailed-errors=1
-where=yt
-exclude=answer_testing
-with-xunit=1


diff -r e2d94c653c5ac243017f361ff4d20fb1c0522e57 -r 881227d39ae813b04ccca2c3a29b0100d14c586b yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -397,9 +397,15 @@
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
 
-        nblockx = self.parameters["nblockx"]
-        nblocky = self.parameters["nblocky"]
-        nblockz = self.parameters["nblockz"]
+        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+            nblockx = self.parameters["nblockx"]
+            nblocky = self.parameters["nblocky"]
+            nblockz = self.parameters["nblockz"]
+        else : # Uniform Grid
+            nblockx = self.parameters["iprocs"]
+            nblocky = self.parameters["jprocs"]
+            nblockz = self.parameters["kprocs"]
+                        
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/93406bd39d0e/
changeset:   93406bd39d0e
branch:      yt
user:        jzuhone
date:        2012-11-14 21:43:54
summary:     Fixing unintentional edit of setup.cfg
affected #:  1 file

diff -r 881227d39ae813b04ccca2c3a29b0100d14c586b -r 93406bd39d0e2dce7a4ecfb80f30c36dd5b28062 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1
\ No newline at end of file



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8b9074656a51/
changeset:   8b9074656a51
branch:      yt
user:        jzuhone
date:        2012-11-14 21:44:46
summary:     Fixing unintentional edit of setup.cfg
affected #:  1 file

diff -r 93406bd39d0e2dce7a4ecfb80f30c36dd5b28062 -r 8b9074656a5119b90f3b2f8996b1ab172aa0d82e setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,4 +6,4 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
\ No newline at end of file
+with-xunit=1



https://bitbucket.org/yt_analysis/yt-3.0/changeset/917e70c630b3/
changeset:   917e70c630b3
branch:      yt
user:        MatthewTurk
date:        2012-11-14 21:49:53
summary:     Merged in jzuhone/yt-flash_fixes (pull request #341)
affected #:  2 files



diff -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 -r 917e70c630b36db1f9a821023a3a26b4c9454ac9 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -397,9 +397,15 @@
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
 
-        nblockx = self.parameters["nblockx"]
-        nblocky = self.parameters["nblocky"]
-        nblockz = self.parameters["nblockz"]
+        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+            nblockx = self.parameters["nblockx"]
+            nblocky = self.parameters["nblocky"]
+            nblockz = self.parameters["nblockz"]
+        else : # Uniform Grid
+            nblockx = self.parameters["iprocs"]
+            nblocky = self.parameters["jprocs"]
+            nblockz = self.parameters["kprocs"]
+                        
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8c04bce75238/
changeset:   8c04bce75238
branch:      yt
user:        samskillman
date:        2012-11-14 18:13:00
summary:     Restoring the answer-name functionality, and also changing the default hash so
that it can be modified (by enzo testing) to have the enzo hash, and not the yt
hash.  Also putting the local storage shelve data inside the hash/answer-name
directory.  Instead of having hash/, hash.dir, hash.bak, hash.dat all in the
storage directory, put hash.dir, hash.bak, hash.dat inside hash/.
affected #:  1 file

diff -r 6021af872bc06755516426d6fc8f3acc8e49c81d -r 8c04bce75238344c4c47282b81a4949b86326c01 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -49,6 +49,7 @@
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
+    _my_version = None
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
@@ -65,16 +66,26 @@
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
+    @property
+    def my_version(self, version=None):
+        if self._my_version is None:
+            if version is not None:
+                self._my_version = version
+            else:
+                try:
+                    self._my_version = get_yt_version()
+                except:
+                    self._my_version = "UNKNOWN%s" % (time.time())
+        else:
+            return self._my_version
+
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
         disable_stream_logging()
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
+        if options.this_name is None: 
+            options.this_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -84,19 +95,24 @@
         elif options.compare_name == "latest":
             options.compare_name = _latest
 
-        # We only either store or test.
+        # Local/Cloud storage 
         if options.store_local_results:
+            storage_class = AnswerTestLocalStorage
+            # Fix up filename for local storage 
             if options.compare_name is not None:
-                options.compare_name = "%s/%s" % \
+                options.compare_name = "%s/%s/%s" % \
                         (os.path.realpath(options.output_dir), 
-                         options.compare_name)
-            AnswerTestingTest.reference_storage = \
-                self.storage = \
-                    AnswerTestLocalStorage(options.compare_name, 
-                                           not options.store_results)
+                         options.compare_name, options.compare_name)
+            if options.this_name is not None:
+                options.this_name= "%s/%s/%s" % \
+                        (os.path.realpath(options.output_dir), 
+                         options.this_name, options.this_name)
         else:
-            AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+            storage_class = AnswerTestCloudStorage
+
+        # Initialize answer/reference storage
+        AnswerTestingTest.reference_storage = self.storage = \
+                storage_class(options.compare_name, options.this_name)
 
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
@@ -108,10 +124,10 @@
         self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name, read=True):
+    def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name
+        self.answer_name = answer_name
         self.cache = {}
-        self.read = read
     def dump(self, result_storage, result):
         raise NotImplementedError 
     def get(self, pf_name, default=None):
@@ -119,7 +135,7 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -135,7 +151,7 @@
         return rv
 
     def dump(self, result_storage):
-        if self.read: return
+        if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,18 +160,18 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
             if tk is not None: tk.delete()
             k = Key(bucket)
-            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
-        if self.read: return 
+        if self.answer_name is None: return
         # Store data using shelve
-        ds = shelve.open(self.reference_name, protocol=-1)
+        ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
             if name in ds:
@@ -164,7 +180,7 @@
         ds.close()
 
     def get(self, pf_name, default=None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         # Read data using shelve
         answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
@@ -224,8 +240,7 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage.read and \
-           self.reference_storage.reference_name is not None:
+        if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/512ea98a0ad9/
changeset:   512ea98a0ad9
branch:      yt
user:        samskillman
date:        2012-11-14 19:15:45
summary:     Fixes if the output dir for the local answer storage doesn't exist.
affected #:  1 file

diff -r 8c04bce75238344c4c47282b81a4949b86326c01 -r 512ea98a0ad912131b90407670b0b1ded5abe3f9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -101,12 +101,16 @@
             # Fix up filename for local storage 
             if options.compare_name is not None:
                 options.compare_name = "%s/%s/%s" % \
-                        (os.path.realpath(options.output_dir), 
-                         options.compare_name, options.compare_name)
+                    (options.output_dir, options.compare_name, 
+                     options.compare_name)
             if options.this_name is not None:
-                options.this_name= "%s/%s/%s" % \
-                        (os.path.realpath(options.output_dir), 
-                         options.this_name, options.this_name)
+                name_dir_path = "%s/%s" % \
+                    (os.path.realpath(options.output_dir), 
+                    options.this_name)
+                if not os.path.isdir(name_dir_path):
+                    os.mkdir(name_dir_path)
+                options.this_name= "%s/%s" % \
+                        (name_dir_path, options.this_name)
         else:
             storage_class = AnswerTestCloudStorage
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/849c77e32bd5/
changeset:   849c77e32bd5
branch:      yt
user:        samskillman
date:        2012-11-14 19:27:59
summary:     Merging in from main
affected #:  2 files

diff -r 512ea98a0ad912131b90407670b0b1ded5abe3f9 -r 849c77e32bd58c47a595006c7622d3b2d1f7b22b yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -62,14 +62,14 @@
     for field in fields:
         yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
-        for axis in [0, 1, 2]:
-            for ds in dso:
+        for ds in dso:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds, decimals=3)
-                yield FieldValuesTest(
-                        pf_fn, field, ds, decimals=3)
+            yield FieldValuesTest(
+                    pf_fn, field, ds, decimals=3)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 


diff -r 512ea98a0ad912131b90407670b0b1ded5abe3f9 -r 849c77e32bd58c47a595006c7622d3b2d1f7b22b yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -328,7 +328,7 @@
             assert_equal(new_result, old_result, 
                          err_msg=err_msg, verbose=True)
         else:
-            assert_rel_equal(new_result, old_result, self.decimals,
+            assert_allclose(new_result, old_result, 10.**(-self.decimals),
                              err_msg=err_msg, verbose=True)
 
 class AllFieldValuesTest(AnswerTestingTest):
@@ -389,8 +389,8 @@
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_rel_equal(new_result[k], old_result[k], 
-                                 self.decimals, err_msg=err_msg)
+                assert_allclose(new_result[k], old_result[k], 
+                                 10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f5cf9655b568/
changeset:   f5cf9655b568
branch:      yt
user:        samskillman
date:        2012-11-14 21:49:24
summary:     Fixing my_version property after suggestions from @matthewturk
affected #:  1 file

diff -r 849c77e32bd58c47a595006c7622d3b2d1f7b22b -r f5cf9655b5689bffac61d2d5c5c3c531917106e2 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -68,16 +68,15 @@
 
     @property
     def my_version(self, version=None):
-        if self._my_version is None:
-            if version is not None:
-                self._my_version = version
-            else:
-                try:
-                    self._my_version = get_yt_version()
-                except:
-                    self._my_version = "UNKNOWN%s" % (time.time())
-        else:
+        if self._my_version is not None:
             return self._my_version
+        if version is None:
+            try:
+                version = get_yt_version()
+            except:
+                version = "UNKNOWN%s" % (time.time())
+        self._my_version = version
+        return self._my_version
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9fafae943a69/
changeset:   9fafae943a69
branch:      yt
user:        MatthewTurk
date:        2012-11-14 22:00:38
summary:     Merged in samskillman/yt (pull request #340)
affected #:  1 file

diff -r 917e70c630b36db1f9a821023a3a26b4c9454ac9 -r 9fafae943a699a5e2740699f610048c57eea7b83 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -49,6 +49,7 @@
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
+    _my_version = None
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
@@ -65,16 +66,25 @@
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
+    @property
+    def my_version(self, version=None):
+        if self._my_version is not None:
+            return self._my_version
+        if version is None:
+            try:
+                version = get_yt_version()
+            except:
+                version = "UNKNOWN%s" % (time.time())
+        self._my_version = version
+        return self._my_version
+
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
         disable_stream_logging()
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
+        if options.this_name is None: 
+            options.this_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -84,19 +94,28 @@
         elif options.compare_name == "latest":
             options.compare_name = _latest
 
-        # We only either store or test.
+        # Local/Cloud storage 
         if options.store_local_results:
+            storage_class = AnswerTestLocalStorage
+            # Fix up filename for local storage 
             if options.compare_name is not None:
-                options.compare_name = "%s/%s" % \
-                        (os.path.realpath(options.output_dir), 
-                         options.compare_name)
-            AnswerTestingTest.reference_storage = \
-                self.storage = \
-                    AnswerTestLocalStorage(options.compare_name, 
-                                           not options.store_results)
+                options.compare_name = "%s/%s/%s" % \
+                    (options.output_dir, options.compare_name, 
+                     options.compare_name)
+            if options.this_name is not None:
+                name_dir_path = "%s/%s" % \
+                    (os.path.realpath(options.output_dir), 
+                    options.this_name)
+                if not os.path.isdir(name_dir_path):
+                    os.mkdir(name_dir_path)
+                options.this_name= "%s/%s" % \
+                        (name_dir_path, options.this_name)
         else:
-            AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+            storage_class = AnswerTestCloudStorage
+
+        # Initialize answer/reference storage
+        AnswerTestingTest.reference_storage = self.storage = \
+                storage_class(options.compare_name, options.this_name)
 
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
@@ -108,10 +127,10 @@
         self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name, read=True):
+    def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name
+        self.answer_name = answer_name
         self.cache = {}
-        self.read = read
     def dump(self, result_storage, result):
         raise NotImplementedError 
     def get(self, pf_name, default=None):
@@ -119,7 +138,7 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -135,7 +154,7 @@
         return rv
 
     def dump(self, result_storage):
-        if self.read: return
+        if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,18 +163,18 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
             if tk is not None: tk.delete()
             k = Key(bucket)
-            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
-        if self.read: return 
+        if self.answer_name is None: return
         # Store data using shelve
-        ds = shelve.open(self.reference_name, protocol=-1)
+        ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
             if name in ds:
@@ -164,7 +183,7 @@
         ds.close()
 
     def get(self, pf_name, default=None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         # Read data using shelve
         answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
@@ -224,8 +243,7 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage.read and \
-           self.reference_storage.reference_name is not None:
+        if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/547be7c09542/
changeset:   547be7c09542
branch:      yt
user:        jzuhone
date:        2012-11-14 21:59:48
summary:     Adding support for standalone particle files in FLASH, provided that they have a corresponding plotfile or checkpoint file that has a matching grid structure (in other words, both files written at the same simulation time).

One would access particle data in this fashion:

{{{
#!python

pf = load("radio_halo_1kpc_hdf5_plt_cnt_0120", particle_filename="radio_halo_1kpc_hdf5_part_0120")

}}}

Then the particle data may be accessed as if they were part of the main file.
affected #:  2 files

diff -r 917e70c630b36db1f9a821023a3a26b4c9454ac9 -r 547be7c0954206d1e7f969eba4547517d7156180 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -69,7 +69,8 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
-
+        self._particle_handle = pf._particle_handle
+        
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
@@ -79,9 +80,9 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._handle) :
+        if ("/particle names" in self._particle_handle) :
             self.field_list += ["particle_" + s[0].strip() for s
-                                in self._handle["/particle names"][:]]
+                                in self._particle_handle["/particle names"][:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -98,6 +99,7 @@
     def _parse_hierarchy(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
+        f_part = self._particle_handle # shortcut
         
         # Initialize to the domain left / domain right
         ND = self.parameter_file.dimensionality
@@ -120,7 +122,7 @@
                               for ax in 'xyz']
         self.grid_dimensions[:] *= (nxb, nyb, nzb)
         try:
-            self.grid_particle_count[:] = f["/localnp"][:][:,None]
+            self.grid_particle_count[:] = f_part["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
@@ -209,6 +211,7 @@
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
+                 particle_filename = None, 
                  conversion_override = None):
 
         if self._handle is not None: return
@@ -216,6 +219,16 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None :
+            self._particle_handle = self._handle
+        else :
+            try :
+                self._particle_handle = h5py.File(self.particle_filename, "r")
+            except :
+                raise IOError(self.particle_filename)
+                                                                
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 


diff -r 917e70c630b36db1f9a821023a3a26b4c9454ac9 -r 547be7c0954206d1e7f969eba4547517d7156180 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -39,9 +39,11 @@
         # Now we cache the particle fields
         self.pf = pf
         self._handle = pf._handle
+        self._particle_handle = pf._particle_handle
+        
         try :
             particle_fields = [s[0].strip() for s in
-                               self._handle["/particle names"][:]]
+                               self._particle_handle["/particle names"][:]]
             self._particle_fields = dict([("particle_" + s, i) for i, s in
                                           enumerate(particle_fields)])
         except KeyError:
@@ -53,12 +55,13 @@
 
     def _read_data_set(self, grid, field):
         f = self._handle
+        f_part = self._particle_handle
         if field in self._particle_fields:
             if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]
-            tr = f["/tracer particles"][start:end, fi]
+            tr = f_part["/tracer particles"][start:end, fi]
         else:
             tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
         return tr.astype("float64")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/64ab22a8de33/
changeset:   64ab22a8de33
branch:      yt
user:        MatthewTurk
date:        2012-11-14 22:18:44
summary:     Merged in jzuhone/yt (pull request #342)
affected #:  2 files

diff -r 9fafae943a699a5e2740699f610048c57eea7b83 -r 64ab22a8de3385fbee60afd6927b5b25ad7541e4 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -69,7 +69,8 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
-
+        self._particle_handle = pf._particle_handle
+        
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
@@ -79,9 +80,9 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._handle) :
+        if ("/particle names" in self._particle_handle) :
             self.field_list += ["particle_" + s[0].strip() for s
-                                in self._handle["/particle names"][:]]
+                                in self._particle_handle["/particle names"][:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -98,6 +99,7 @@
     def _parse_hierarchy(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
+        f_part = self._particle_handle # shortcut
         
         # Initialize to the domain left / domain right
         ND = self.parameter_file.dimensionality
@@ -120,7 +122,7 @@
                               for ax in 'xyz']
         self.grid_dimensions[:] *= (nxb, nyb, nzb)
         try:
-            self.grid_particle_count[:] = f["/localnp"][:][:,None]
+            self.grid_particle_count[:] = f_part["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
@@ -209,6 +211,7 @@
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
+                 particle_filename = None, 
                  conversion_override = None):
 
         if self._handle is not None: return
@@ -216,6 +219,16 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None :
+            self._particle_handle = self._handle
+        else :
+            try :
+                self._particle_handle = h5py.File(self.particle_filename, "r")
+            except :
+                raise IOError(self.particle_filename)
+                                                                
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 


diff -r 9fafae943a699a5e2740699f610048c57eea7b83 -r 64ab22a8de3385fbee60afd6927b5b25ad7541e4 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -39,9 +39,11 @@
         # Now we cache the particle fields
         self.pf = pf
         self._handle = pf._handle
+        self._particle_handle = pf._particle_handle
+        
         try :
             particle_fields = [s[0].strip() for s in
-                               self._handle["/particle names"][:]]
+                               self._particle_handle["/particle names"][:]]
             self._particle_fields = dict([("particle_" + s, i) for i, s in
                                           enumerate(particle_fields)])
         except KeyError:
@@ -53,12 +55,13 @@
 
     def _read_data_set(self, grid, field):
         f = self._handle
+        f_part = self._particle_handle
         if field in self._particle_fields:
             if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]
-            tr = f["/tracer particles"][start:end, fi]
+            tr = f_part["/tracer particles"][start:end, fi]
         else:
             tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
         return tr.astype("float64")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f445fab0106d/
changeset:   f445fab0106d
branch:      yt
user:        ngoldbaum
date:        2012-11-14 00:43:43
summary:     plot window plots now use the unit name supplied via the width keyword if width is a tuple.  Closes #463
Also streamlining, simplifying, and refactoring the way window plots are set up.
affected #:  1 file

diff -r 5e1ba28cc582d1f7dc9de6f0c73695722d1f32bd -r f445fab0106d61d158df0f68fa97929c5ff990c4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -146,53 +146,33 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
-def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
+def StandardWidth(axis, width, depth, pf):
     if width == None:
-        width = (pf.domain_width[x_dict[axis]],
-                 pf.domain_width[y_dict[axis]])
+        # Default to code units
+        if not iterable(axis):
+            width = ((pf.domain_width[x_dict[axis]], '1'),
+                     (pf.domain_width[y_dict[axis]], '1'))
+        else:
+            width = ((pf.domain_width.min(), '1'),
+                     (pf.domain_width.min(), '1'))
     elif iterable(width): 
-        if isinstance(width[1],str):
-            w,unit = width
-            width = (w, w)
-        elif isinstance(width[1],tuple):
-            wx,unitx = width[0]
-            wy,unity = width[1]
-            width = (wx/pf[unitx],wy/pf[unity])
+        if isinstance(width[1], str):
+            width = (width, width)
+        elif isinstance(width[1], tuple):
+            pass
     else:
-        width = (width, width)
-    Wx, Wy = width
-    width = (Wx/pf[unit], Wy/pf[unit])
-    if isinstance(center,str):
-        if center.lower() == 'm' or center.lower() == 'max':
-            v, center = pf.h.find_max("Density")
-        elif center.lower() == "center" or center.lower() == "c":
-            center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        else:
-            raise RuntimeError('center keyword \"%s\" not recognized'%center)
-    bounds = [center[x_dict[axis]]-width[0]/2,
-              center[x_dict[axis]]+width[0]/2,
-              center[y_dict[axis]]-width[1]/2,
-              center[y_dict[axis]]+width[1]/2]
-    return (bounds,center)
-
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
-    if width == None:
-        width = (pf.domain_width.min(),
-                 pf.domain_width.min())
-    elif iterable(width) and isinstance(width[1],str):
-        w,unit = width
-        width = w
-    if not iterable(width):
-        width = (width, width)
-    Wx, Wy = width
-    width = np.array((Wx/pf[unit], Wy/pf[unit]))
+        width = ((width, '1'), (width, '1'))
     if depth != None:
-        if iterable(depth) and isinstance(depth[1],str):
-            d,unit = depth
-            depth = d/pf[unit]
+        if iterable(depth) and isinstance(depth[1], str):
+            depth = (depth,)
         elif iterable(depth):
             raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
-        width = np.append(width,depth)
+        else:
+            depth = ((depth, '1'),)
+        width += depth
+    return width
+
+def StandardCenter(center, pf):
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -200,21 +180,40 @@
             center = (pf.domain_left_edge + pf.domain_right_edge) / 2
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
+    return center
 
-    if width.shape == (2,):
+
+def GetWindowParameters(axis, center, width, pf):
+    width = StandardWidth(axis, width, None, pf)
+    center = StandardCenter(center, pf)
+    units = (width[0][1], width[1][1])
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+              center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
+    return (bounds, center, units)
+
+def GetObliqueWindowParameters(normal, center, width, pf, depth=None):
+    width = StandardWidth(normal, width, depth, pf)
+    center = StandardCenter(center, pf)
+
+    if len(width) == 2:
         # Transforming to the cutting plane coordinate system
         center = np.array(center)
         center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-        width = width
     
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+        units = [width[0][1], width[1][1]]
+        bounds = [-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2]
     else:
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
-
-    return (bounds,center)
+        units = [width[0][1], width[1][1], width[2][1]]
+        bounds = [-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2]
+    return (bounds, center, units)
 
 class PlotWindow(object):
     _plot_valid = False
@@ -264,7 +263,7 @@
         self.oblique = oblique
         self.data_source = data_source
         self.buff_size = buff_size
-        self.antialias = True
+        self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         self.fontsize = fontsize
@@ -527,7 +526,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._unit = None
+        self._axes_unit_names = None
         self._callbacks = []
         self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
@@ -654,12 +653,14 @@
 
         Parameters
         ----------
-        unit_name : string
+        unit_name : string or two element tuple of strings
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+            units, and only show the axes name. If unit_name is a tuple, the first
+            element is assumed to be the unit for the x axis and the second element
+            the unit for the y axis.
 
         Raises
         ------
@@ -677,12 +678,13 @@
         >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
-        try:
-            self.pf[unit_name]
-        except KeyError: 
-            if unit_name is not None:
-                raise YTUnitNotRecognized(unit_name)
-        self._unit = unit_name
+        if unit_name is not None:
+            for un in unit_name:
+                try:
+                    self.pf[un]
+                except KeyError: 
+                    raise YTUnitNotRecognized(un)
+        self._axes_unit_names = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -690,10 +692,11 @@
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        if self._unit is None:
+        if self._axes_unit_names is None:
             unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
         else:
-            unit = self._unit
+            unit = self._axes_unit_names
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -704,19 +707,20 @@
             center[y_dict[self._frb.axis]] = 0.5 * (
                 self.ylim[0] + self.ylim[1])
             xc, yc, zc = center
+        pdb.set_trace()
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
-                x_width = x_width*self.pf[unit],
-                y_width = y_width*self.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma,
-                xc = xc, yc = yc, zc = zc)
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit],
-                      y_width = y_width*self.pf[unit],
-                      unit = unit, units = units, mi = mi, ma = ma,
-                      xc = xc, yc = yc, zc = zc)
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
     def get_field_units(self, field, strip_mathml = True):
@@ -776,9 +780,17 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.ylim[i] - yc for i in (0,1)])
-            extent = [el*self.pf[md['unit']] for el in extent]
+            if (md['axes_unit_names'][0] != md['axes_unit_names'][1]):
+                raise RuntimeError(
+                    'Axis unit labels for the x and y axis must be the same. \n'
+                    'x axis label: \"%(x)s\" \n'
+                    'y axis label: \"%(y)s\" \n' 
+                    % {'x' : md['axes_unit_names'][0],
+                       'y' : md['axes_unit_names'][1]})
+
+            extentx = [(self.xlim[i] - xc)*self.pf[md['axes_unit_names'][0]] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc)*self.pf[md['axes_unit_names'][1]] for i in (0,1)]
+            extent = extentx + extenty
 
             if f in self.plots.keys():
                 zlim = (self.plots[f].zmin,self.plots[f].zmax)
@@ -798,20 +810,21 @@
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
+
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
-            if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit']+')'
-            else:
-                axes_unit_label = ''
-
+            axes_unit_labels = ['', '']
+            for i, un in enumerate(md['axes_unit_names']):
+                if un not in ['1', 'u', 'unitary']:
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    
             if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_label + r'}$' for i in (0,1)]
+                        axes_unit_labels[i] + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
+                          r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
 
             self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
             self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
@@ -831,14 +844,14 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['units'] == None or md['units'] == '':
+            if md['colorbar_unit'] == None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
-                    parser.parse(r'$'+md['units']+r'$')
+                    parser.parse(r'$'+md['colorbar_unit']+r'$')
                 except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-                label = field_name+r'$\/\/('+md['units']+r')$'
+                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
+                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
 
             self.plots[f].cb.set_label(label,fontsize=self.fontsize)
 
@@ -1038,12 +1051,14 @@
         >>> p.save('sliceplot')
         
         """
-        # tHis will handle time series data and controllers
+        # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1130,7 +1145,9 @@
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None  and units != ('1', '1'):
+            axes_unit = units
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1180,8 +1197,10 @@
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
-        cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
@@ -1264,8 +1283,9 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
-        # Hard-coding the resolution for now
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
         OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
@@ -1279,9 +1299,9 @@
 _metadata_template = """
 %(pf)s<br><br>
-Field of View:  %(x_width)0.3f %(unit)s<br>
-Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s<br>
+Field of View:  %(x_width)0.3f %(axes_unit_names)s<br>
+Minimum Value:  %(mi)0.3e %(colorbar_unit)s<br>
+Maximum Value:  %(ma)0.3e %(colorbar_unit)s<br>
 Central Point:  (data coords)<br>
    %(xc)0.14f<br>
    %(yc)0.14f<br>
@@ -1549,7 +1569,7 @@
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent = extent,
-                                      norm = norm, vmin = self.zmin, 
-                                      vmax = self.zmax, cmap = cmap)
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, 
+                                      vmax=self.zmax, cmap=cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/280b561398ad/
changeset:   280b561398ad
branch:      yt
user:        ngoldbaum
date:        2012-11-14 22:55:13
summary:     Merging.
affected #:  1 file

diff -r 64ab22a8de3385fbee60afd6927b5b25ad7541e4 -r 280b561398adf16b96f27246eb8dd462ab3a2344 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -146,53 +146,33 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
-def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
+def StandardWidth(axis, width, depth, pf):
     if width == None:
-        width = (pf.domain_width[x_dict[axis]],
-                 pf.domain_width[y_dict[axis]])
+        # Default to code units
+        if not iterable(axis):
+            width = ((pf.domain_width[x_dict[axis]], '1'),
+                     (pf.domain_width[y_dict[axis]], '1'))
+        else:
+            width = ((pf.domain_width.min(), '1'),
+                     (pf.domain_width.min(), '1'))
     elif iterable(width): 
-        if isinstance(width[1],str):
-            w,unit = width
-            width = (w, w)
-        elif isinstance(width[1],tuple):
-            wx,unitx = width[0]
-            wy,unity = width[1]
-            width = (wx/pf[unitx],wy/pf[unity])
+        if isinstance(width[1], str):
+            width = (width, width)
+        elif isinstance(width[1], tuple):
+            pass
     else:
-        width = (width, width)
-    Wx, Wy = width
-    width = (Wx/pf[unit], Wy/pf[unit])
-    if isinstance(center,str):
-        if center.lower() == 'm' or center.lower() == 'max':
-            v, center = pf.h.find_max("Density")
-        elif center.lower() == "center" or center.lower() == "c":
-            center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        else:
-            raise RuntimeError('center keyword \"%s\" not recognized'%center)
-    bounds = [center[x_dict[axis]]-width[0]/2,
-              center[x_dict[axis]]+width[0]/2,
-              center[y_dict[axis]]-width[1]/2,
-              center[y_dict[axis]]+width[1]/2]
-    return (bounds,center)
-
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
-    if width == None:
-        width = (pf.domain_width.min(),
-                 pf.domain_width.min())
-    elif iterable(width) and isinstance(width[1],str):
-        w,unit = width
-        width = w
-    if not iterable(width):
-        width = (width, width)
-    Wx, Wy = width
-    width = np.array((Wx/pf[unit], Wy/pf[unit]))
+        width = ((width, '1'), (width, '1'))
     if depth != None:
-        if iterable(depth) and isinstance(depth[1],str):
-            d,unit = depth
-            depth = d/pf[unit]
+        if iterable(depth) and isinstance(depth[1], str):
+            depth = (depth,)
         elif iterable(depth):
             raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
-        width = np.append(width,depth)
+        else:
+            depth = ((depth, '1'),)
+        width += depth
+    return width
+
+def StandardCenter(center, pf):
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -200,21 +180,40 @@
             center = (pf.domain_left_edge + pf.domain_right_edge) / 2
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
+    return center
 
-    if width.shape == (2,):
+
+def GetWindowParameters(axis, center, width, pf):
+    width = StandardWidth(axis, width, None, pf)
+    center = StandardCenter(center, pf)
+    units = (width[0][1], width[1][1])
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+              center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
+    return (bounds, center, units)
+
+def GetObliqueWindowParameters(normal, center, width, pf, depth=None):
+    width = StandardWidth(normal, width, depth, pf)
+    center = StandardCenter(center, pf)
+
+    if len(width) == 2:
         # Transforming to the cutting plane coordinate system
         center = np.array(center)
         center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-        width = width
     
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+        units = [width[0][1], width[1][1]]
+        bounds = [-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2]
     else:
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
-
-    return (bounds,center)
+        units = [width[0][1], width[1][1], width[2][1]]
+        bounds = [-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2]
+    return (bounds, center, units)
 
 class PlotWindow(object):
     _plot_valid = False
@@ -264,7 +263,7 @@
         self.oblique = oblique
         self.data_source = data_source
         self.buff_size = buff_size
-        self.antialias = True
+        self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         self.fontsize = fontsize
@@ -527,7 +526,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._unit = None
+        self._axes_unit_names = None
         self._callbacks = []
         self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
@@ -654,12 +653,14 @@
 
         Parameters
         ----------
-        unit_name : string
+        unit_name : string or two element tuple of strings
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+            units, and only show the axes name. If unit_name is a tuple, the first
+            element is assumed to be the unit for the x axis and the second element
+            the unit for the y axis.
 
         Raises
         ------
@@ -677,12 +678,13 @@
         >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
-        try:
-            self.pf[unit_name]
-        except KeyError: 
-            if unit_name is not None:
-                raise YTUnitNotRecognized(unit_name)
-        self._unit = unit_name
+        if unit_name is not None:
+            for un in unit_name:
+                try:
+                    self.pf[un]
+                except KeyError: 
+                    raise YTUnitNotRecognized(un)
+        self._axes_unit_names = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -690,10 +692,11 @@
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        if self._unit is None:
+        if self._axes_unit_names is None:
             unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
         else:
-            unit = self._unit
+            unit = self._axes_unit_names
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -704,19 +707,20 @@
             center[y_dict[self._frb.axis]] = 0.5 * (
                 self.ylim[0] + self.ylim[1])
             xc, yc, zc = center
+        pdb.set_trace()
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
-                x_width = x_width*self.pf[unit],
-                y_width = y_width*self.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma,
-                xc = xc, yc = yc, zc = zc)
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit],
-                      y_width = y_width*self.pf[unit],
-                      unit = unit, units = units, mi = mi, ma = ma,
-                      xc = xc, yc = yc, zc = zc)
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
     def get_field_units(self, field, strip_mathml = True):
@@ -776,9 +780,17 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.ylim[i] - yc for i in (0,1)])
-            extent = [el*self.pf[md['unit']] for el in extent]
+            if (md['axes_unit_names'][0] != md['axes_unit_names'][1]):
+                raise RuntimeError(
+                    'Axis unit labels for the x and y axis must be the same. \n'
+                    'x axis label: \"%(x)s\" \n'
+                    'y axis label: \"%(y)s\" \n' 
+                    % {'x' : md['axes_unit_names'][0],
+                       'y' : md['axes_unit_names'][1]})
+
+            extentx = [(self.xlim[i] - xc)*self.pf[md['axes_unit_names'][0]] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc)*self.pf[md['axes_unit_names'][1]] for i in (0,1)]
+            extent = extentx + extenty
 
             if f in self.plots.keys():
                 zlim = (self.plots[f].zmin,self.plots[f].zmax)
@@ -798,20 +810,21 @@
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
+
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
-            if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit']+')'
-            else:
-                axes_unit_label = ''
-
+            axes_unit_labels = ['', '']
+            for i, un in enumerate(md['axes_unit_names']):
+                if un not in ['1', 'u', 'unitary']:
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    
             if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_label + r'}$' for i in (0,1)]
+                        axes_unit_labels[i] + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
+                          r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
 
             self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
             self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
@@ -831,14 +844,14 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['units'] == None or md['units'] == '':
+            if md['colorbar_unit'] == None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
-                    parser.parse(r'$'+md['units']+r'$')
+                    parser.parse(r'$'+md['colorbar_unit']+r'$')
                 except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-                label = field_name+r'$\/\/('+md['units']+r')$'
+                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
+                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
 
             self.plots[f].cb.set_label(label,fontsize=self.fontsize)
 
@@ -1038,12 +1051,14 @@
         >>> p.save('sliceplot')
         
         """
-        # tHis will handle time series data and controllers
+        # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1130,7 +1145,9 @@
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None  and units != ('1', '1'):
+            axes_unit = units
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1180,8 +1197,10 @@
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
-        cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
@@ -1264,8 +1283,9 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
-        # Hard-coding the resolution for now
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
         OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
@@ -1279,9 +1299,9 @@
 _metadata_template = """
 %(pf)s<br><br>
-Field of View:  %(x_width)0.3f %(unit)s<br>
-Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s<br>
+Field of View:  %(x_width)0.3f %(axes_unit_names)s<br>
+Minimum Value:  %(mi)0.3e %(colorbar_unit)s<br>
+Maximum Value:  %(ma)0.3e %(colorbar_unit)s<br>
 Central Point:  (data coords)<br>
    %(xc)0.14f<br>
    %(yc)0.14f<br>
@@ -1549,7 +1569,7 @@
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent = extent,
-                                      norm = norm, vmin = self.zmin, 
-                                      vmax = self.zmax, cmap = cmap)
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, 
+                                      vmax=self.zmax, cmap=cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2dad179d5aec/
changeset:   2dad179d5aec
branch:      yt
user:        ngoldbaum
date:        2012-11-14 23:59:10
summary:     Fixing a number of issues in plot window creation.  Updating set_width to use the new machinery.
Updating docstrings.
affected #:  1 file

diff -r 280b561398adf16b96f27246eb8dd462ab3a2344 -r 2dad179d5aecd55cc7856b910247a5855be4fa7e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -147,12 +147,14 @@
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def StandardWidth(axis, width, depth, pf):
-    if width == None:
+    if width is None:
         # Default to code units
         if not iterable(axis):
             width = ((pf.domain_width[x_dict[axis]], '1'),
                      (pf.domain_width[y_dict[axis]], '1'))
         else:
+            # axis is actually the normal vector
+            # for an off-axis data object.
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
     elif iterable(width): 
@@ -162,7 +164,7 @@
             pass
     else:
         width = ((width, '1'), (width, '1'))
-    if depth != None:
+    if depth is not None:
         if iterable(depth) and isinstance(depth[1], str):
             depth = (depth,)
         elif iterable(depth):
@@ -182,7 +184,6 @@
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
     return center
 
-
 def GetWindowParameters(axis, center, width, pf):
     width = StandardWidth(axis, width, None, pf)
     center = StandardCenter(center, pf)
@@ -205,14 +206,14 @@
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
     
-        units = [width[0][1], width[1][1]]
-        bounds = [-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
-                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2]
+        units = (width[0][1], width[1][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
-        units = [width[0][1], width[1][1], width[2][1]]
-        bounds = [-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+        units = (width[0][1], width[1][1], width[2][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
                   -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
-                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2]
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
     return (bounds, center, units)
 
 class PlotWindow(object):
@@ -434,42 +435,43 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  the width of the image.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will  
+             use the supplied units.
         unit : str
-            the unit the width has been specified in.
-            defaults to code units.  If width is a tuple this 
-            argument is ignored
+             the unit the width has been specified in.
+             defaults to code units.  If width is a tuple this 
+             argument is ignored
 
         """
-        if iterable(width): 
-            if isinstance(width[1],str):
-                w, unit = width
-                width = (w, w)
-            elif isinstance(width[1], tuple):
-                wx,unitx = width[0]
-                wy,unity = width[1]
-                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        if width is not None:
+            set_axes_unit = True
         else:
-            width = (width, width)
-        Wx, Wy = width
-        width = (Wx,Wy)
-        width = [w / self.pf[unit] for w in width]
+            set_axes_unit = False
+
+        width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
         centery = (self.ylim[1] + self.ylim[0])/2. 
         
-        self.xlim = (centerx - width[0]/2.,
-                     centerx + width[0]/2.)
-        self.ylim = (centery - width[1]/2.,
-                     centery + width[1]/2.)
+        units = (width[0][1], width[1][1])
+
+        if set_axes_unit:
+            self._axes_unit_names = units
+        else:
+            self._axes_unit_names = None
+
+        self.xlim = (centerx - width[0][0]/self.pf[units[0]]/2.,
+                     centerx + width[0][0]/self.pf[units[0]]/2.)
+        self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
+                     centery + width[1][0]/self.pf[units[1]]/2.)
         
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
-            mw = max(width)
+            mw = max([width[0][0], width[1][0]])
             self.zlim = (centerz - mw/2.,
                          centerz + mw/2.)
-        
+
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -707,7 +709,6 @@
             center[y_dict[self._frb.axis]] = 0.5 * (
                 self.ylim[0] + self.ylim[1])
             xc, yc, zc = center
-        pdb.set_trace()
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
@@ -749,9 +750,9 @@
     _plot_type = None
 
     def __init__(self, *args, **kwargs):
-        if self._frb_generator == None:
+        if self._frb_generator is None:
             self._frb_generator = kwargs.pop("frb_generator")
-        if self._plot_type == None:
+        if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
         PWViewer.__init__(self, *args, **kwargs)
 
@@ -844,7 +845,7 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['colorbar_unit'] == None or md['colorbar_unit'] == '':
+            if md['colorbar_unit'] is None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
@@ -918,7 +919,7 @@
         """
         names = []
         if mpl_kwargs is None: mpl_kwargs = {}
-        if name == None:
+        if name is None:
             name = str(self.pf)
         suffix = os.path.splitext(name)[1]
         if suffix != '':
@@ -1025,13 +1026,14 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -1111,23 +1113,24 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will 
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : A string
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options: 'left-domain', corresponding
-            to the bottom-left hand corner of the simulation domain, 'center-domain',
-            corresponding the center of the simulation domain, or 'center-window' for 
-            the center of the plot window.
+             The location of the origin of the plot coordinate system.
+             Currently, can be set to three options: 'left-domain', corresponding
+             to the bottom-left hand corner of the simulation domain, 'center-domain',
+             corresponding the center of the simulation domain, or 'center-window' for 
+             the center of the plot window.
         weight_field : string
-            The name of the weighting field.  Set to None for no weight.
+             The name of the weighting field.  Set to None for no weight.
         max_level: int
-            The maximum level to project to.
+             The maximum level to project to.
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         
@@ -1156,7 +1159,7 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
                  axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
@@ -1233,8 +1236,8 @@
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
+                 depth=(1, '1'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
                  le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
@@ -1284,7 +1287,7 @@
 
         """
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
-        if axes_unit is None and units != ('1', '1'):
+        if axes_unit is None and units != ('1', '1', '1'):
             axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9d5b945ee428/
changeset:   9d5b945ee428
branch:      yt
user:        ngoldbaum
date:        2012-11-15 06:16:26
summary:     Plots can now be created with different x and y units.
affected #:  1 file

diff -r 2dad179d5aecd55cc7856b910247a5855be4fa7e -r 9d5b945ee428fde46bd25f5b01185850f20e11e2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -781,25 +781,20 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            if (md['axes_unit_names'][0] != md['axes_unit_names'][1]):
-                raise RuntimeError(
-                    'Axis unit labels for the x and y axis must be the same. \n'
-                    'x axis label: \"%(x)s\" \n'
-                    'y axis label: \"%(y)s\" \n' 
-                    % {'x' : md['axes_unit_names'][0],
-                       'y' : md['axes_unit_names'][1]})
+            (unit_x, unit_y) = md['axes_unit_names']
 
-            extentx = [(self.xlim[i] - xc)*self.pf[md['axes_unit_names'][0]] for i in (0,1)]
-            extenty = [(self.ylim[i] - yc)*self.pf[md['axes_unit_names'][1]] for i in (0,1)]
+            extentx = [(self.xlim[i] - xc) * self.pf[unit_x] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc) * self.pf[unit_y] for i in (0,1)]
+
             extent = extentx + extenty
 
             if f in self.plots.keys():
-                zlim = (self.plots[f].zmin,self.plots[f].zmax)
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
             else:
-                zlim = (None,None)
+                zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0])/(self.ylim[1]-self.ylim[0])
-
+            aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
@@ -809,23 +804,26 @@
             else:
                 size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
+            # Correct the aspect ratio in case unit_x and unit_y are different
+            aspect *= self.pf[unit_x]/self.pf[unit_y]
+
+            self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
 
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
             axes_unit_labels = ['', '']
-            for i, un in enumerate(md['axes_unit_names']):
+            for i, un in enumerate((unit_x, unit_y)):
                 if un not in ['1', 'u', 'unitary']:
                     axes_unit_labels[i] = '\/\/('+un+')'
                     
-            if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_labels[i] + r'}$' for i in (0,1)]
-            else:
+            if self.oblique:
                 labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
                           r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
+            else:
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
+                          axes_unit_labels[i] + r'}$' for i in (0,1)]
 
             self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
             self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
@@ -1562,17 +1560,17 @@
         return f.read()
 
 class WindowPlotMPL(PlotMPL):
-    def __init__(self, data, extent, field_transform, cmap, size, zlim):
+    def __init__(self, data, extent, aspect, field_transform, cmap, size, zlim):
         self.zmin, self.zmax = zlim
         PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, field_transform, cmap)
+        self.__init_image(data, extent, aspect, field_transform, cmap)
 
-    def __init_image(self, data, extent, field_transform, cmap):
+    def __init_image(self, data, extent, aspect, field_transform, cmap):
         if (field_transform.name == 'log10'):
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
         self.image = self.axes.imshow(data, origin='lower', extent=extent,
-                                      norm=norm, vmin=self.zmin, 
+                                      norm=norm, vmin=self.zmin, aspect=aspect, 
                                       vmax=self.zmax, cmap=cmap)
-        self.image.axes.ticklabel_format(scilimits=(-4,3))
+        self.image.axes.ticklabel_format(scilimits=(-2,3))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8cb9af8d00ee/
changeset:   8cb9af8d00ee
branch:      yt
user:        samskillman
date:        2012-11-15 19:22:11
summary:     Need to use the real path for local storage output dir.
affected #:  1 file

diff -r 9d5b945ee428fde46bd25f5b01185850f20e11e2 -r 8cb9af8d00eea1225d16825db5e931cfde23baa4 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -100,7 +100,7 @@
             # Fix up filename for local storage 
             if options.compare_name is not None:
                 options.compare_name = "%s/%s/%s" % \
-                    (options.output_dir, options.compare_name, 
+                    (os.path.realpath(options.output_dir), options.compare_name, 
                      options.compare_name)
             if options.this_name is not None:
                 name_dir_path = "%s/%s" % \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f6ac7d402559/
changeset:   f6ac7d402559
branch:      yt
user:        ngoldbaum
date:        2012-11-12 19:56:17
summary:     Fixing the way we find image filename suffixes.  Closes #439.
affected #:  2 files

diff -r 26ca29ffd1d14a695209102bfc422872480e4804 -r f6ac7d402559656798890e668ff3c9c559bbc313 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = os.path.splitext(filename)[-1]
 
     if suffix == '':
         suffix = '.png'


diff -r 26ca29ffd1d14a695209102bfc422872480e4804 -r f6ac7d402559656798890e668ff3c9c559bbc313 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -892,7 +892,7 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         if name == None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = os.path.splitext(name)[-1]
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1453,7 +1453,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = os.path.splitext(name)[-1]
         
         if suffix == '':
             suffix = '.png'



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0711a5d01017/
changeset:   0711a5d01017
branch:      yt
user:        ngoldbaum
date:        2012-11-15 02:12:55
summary:     Merging.
affected #:  2 files

diff -r 2dad179d5aecd55cc7856b910247a5855be4fa7e -r 0711a5d010170da6228f8fb6fe6cc46ce38f4ade yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = os.path.splitext(filename)[-1]
 
     if suffix == '':
         suffix = '.png'


diff -r 2dad179d5aecd55cc7856b910247a5855be4fa7e -r 0711a5d010170da6228f8fb6fe6cc46ce38f4ade yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -921,7 +921,7 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = os.path.splitext(name)[-1]
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1496,7 +1496,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = os.path.splitext(name)[-1]
         
         if suffix == '':
             suffix = '.png'



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9a13943aae04/
changeset:   9a13943aae04
branch:      yt
user:        ngoldbaum
date:        2012-11-15 02:19:48
summary:     Adding a new get_suffix() helper to funcs.py which determines if a file has an image suffix.
affected #:  3 files

diff -r 0711a5d010170da6228f8fb6fe6cc46ce38f4ade -r 9a13943aae0445b03d898edeb16397c5a9f27827 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -567,3 +567,7 @@
         
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
+
+def get_image_suffix(name):
+    suffix = os.path.splitext(name)[1]
+    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''


diff -r 0711a5d010170da6228f8fb6fe6cc46ce38f4ade -r 9a13943aae0445b03d898edeb16397c5a9f27827 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[-1]
+    suffix = get_image_suffix(filename)
 
     if suffix == '':
         suffix = '.png'


diff -r 0711a5d010170da6228f8fb6fe6cc46ce38f4ade -r 9a13943aae0445b03d898edeb16397c5a9f27827 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -921,7 +921,7 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[-1]
+        suffix = get_image_suffix(name)
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1496,7 +1496,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[-1]
+        suffix = get_image_suffix(name)
         
         if suffix == '':
             suffix = '.png'



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e0fea3dd9635/
changeset:   e0fea3dd9635
branch:      yt
user:        MatthewTurk
date:        2012-11-15 19:52:10
summary:     Merged in ngoldbaum/yt (pull request #334)
affected #:  3 files

diff -r 8cb9af8d00eea1225d16825db5e931cfde23baa4 -r e0fea3dd96350177c6bb49727f252f2303e0fdad yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -567,3 +567,7 @@
         
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
+
+def get_image_suffix(name):
+    suffix = os.path.splitext(name)[1]
+    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''


diff -r 8cb9af8d00eea1225d16825db5e931cfde23baa4 -r e0fea3dd96350177c6bb49727f252f2303e0fdad yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = get_image_suffix(filename)
 
     if suffix == '':
         suffix = '.png'


diff -r 8cb9af8d00eea1225d16825db5e931cfde23baa4 -r e0fea3dd96350177c6bb49727f252f2303e0fdad yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -919,7 +919,7 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1494,7 +1494,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         
         if suffix == '':
             suffix = '.png'



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c57187ca13b6/
changeset:   c57187ca13b6
branch:      yt
user:        ngoldbaum
date:        2012-11-15 21:51:36
summary:     Fixing aspect ratios for non-square plot windows
affected #:  1 file

diff -r e0fea3dd96350177c6bb49727f252f2303e0fdad -r c57187ca13b69862a02b80af7b5b3eb83698a4c6 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -793,20 +793,20 @@
             else:
                 zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
             
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
             cbar_frac = 0.0
-            if aspect > 1.0:
-                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
+            if plot_aspect > 1.0:
+                size = (norm_size*(1.+cbar_frac), norm_size/plot_aspect)
             else:
-                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
+                size = (plot_aspect*norm_size*(1.+cbar_frac), norm_size)
 
             # Correct the aspect ratio in case unit_x and unit_y are different
-            aspect *= self.pf[unit_x]/self.pf[unit_y]
-
+            aspect = self.pf[unit_x]/self.pf[unit_y]
+            
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3d6b030a34ad/
changeset:   3d6b030a34ad
branch:      yt
user:        chummels
date:        2012-11-16 01:58:23
summary:     Modifiying the answer testing framework slightly to fix a bug which occurs when running some of the tests, which causes the pf to not be loaded properly.
affected #:  1 file

diff -r c57187ca13b69862a02b80af7b5b3eb83698a4c6 -r 3d6b030a34ad4bc6caf9049ccf29e40cba1a9e4f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -216,9 +216,9 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9894e94c2be6/
changeset:   9894e94c2be6
branch:      yt
user:        jzuhone
date:        2012-11-16 04:05:03
summary:     This should be a single if construct
affected #:  1 file

diff -r 547be7c0954206d1e7f969eba4547517d7156180 -r 9894e94c2be68cb41221a7e67acaae63e5bf07eb yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -266,9 +266,9 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
         raise RuntimeError("Runtime parameter unitsystem with"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ecb651f99c6e/
changeset:   ecb651f99c6e
branch:      yt
user:        jzuhone
date:        2012-11-16 04:10:32
summary:     Merging
affected #:  4 files

diff -r 9894e94c2be68cb41221a7e67acaae63e5bf07eb -r ecb651f99c6e02c03676ed2bcc79fa25fab76f2a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -567,3 +567,7 @@
         
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
+
+def get_image_suffix(name):
+    suffix = os.path.splitext(name)[1]
+    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''


diff -r 9894e94c2be68cb41221a7e67acaae63e5bf07eb -r ecb651f99c6e02c03676ed2bcc79fa25fab76f2a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -49,6 +49,7 @@
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
+    _my_version = None
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
@@ -65,16 +66,25 @@
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
+    @property
+    def my_version(self, version=None):
+        if self._my_version is not None:
+            return self._my_version
+        if version is None:
+            try:
+                version = get_yt_version()
+            except:
+                version = "UNKNOWN%s" % (time.time())
+        self._my_version = version
+        return self._my_version
+
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
         disable_stream_logging()
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
+        if options.this_name is None: 
+            options.this_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -84,19 +94,28 @@
         elif options.compare_name == "latest":
             options.compare_name = _latest
 
-        # We only either store or test.
+        # Local/Cloud storage 
         if options.store_local_results:
+            storage_class = AnswerTestLocalStorage
+            # Fix up filename for local storage 
             if options.compare_name is not None:
-                options.compare_name = "%s/%s" % \
-                        (os.path.realpath(options.output_dir), 
-                         options.compare_name)
-            AnswerTestingTest.reference_storage = \
-                self.storage = \
-                    AnswerTestLocalStorage(options.compare_name, 
-                                           not options.store_results)
+                options.compare_name = "%s/%s/%s" % \
+                    (os.path.realpath(options.output_dir), options.compare_name, 
+                     options.compare_name)
+            if options.this_name is not None:
+                name_dir_path = "%s/%s" % \
+                    (os.path.realpath(options.output_dir), 
+                    options.this_name)
+                if not os.path.isdir(name_dir_path):
+                    os.mkdir(name_dir_path)
+                options.this_name= "%s/%s" % \
+                        (name_dir_path, options.this_name)
         else:
-            AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+            storage_class = AnswerTestCloudStorage
+
+        # Initialize answer/reference storage
+        AnswerTestingTest.reference_storage = self.storage = \
+                storage_class(options.compare_name, options.this_name)
 
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
@@ -108,10 +127,10 @@
         self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name, read=True):
+    def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name
+        self.answer_name = answer_name
         self.cache = {}
-        self.read = read
     def dump(self, result_storage, result):
         raise NotImplementedError 
     def get(self, pf_name, default=None):
@@ -119,7 +138,7 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -135,7 +154,7 @@
         return rv
 
     def dump(self, result_storage):
-        if self.read: return
+        if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,18 +163,18 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
             if tk is not None: tk.delete()
             k = Key(bucket)
-            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
-        if self.read: return 
+        if self.answer_name is None: return
         # Store data using shelve
-        ds = shelve.open(self.reference_name, protocol=-1)
+        ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
             if name in ds:
@@ -164,7 +183,7 @@
         ds.close()
 
     def get(self, pf_name, default=None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         # Read data using shelve
         answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
@@ -197,9 +216,9 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
@@ -224,8 +243,7 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage.read and \
-           self.reference_storage.reference_name is not None:
+        if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]


diff -r 9894e94c2be68cb41221a7e67acaae63e5bf07eb -r ecb651f99c6e02c03676ed2bcc79fa25fab76f2a yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = get_image_suffix(filename)
 
     if suffix == '':
         suffix = '.png'


diff -r 9894e94c2be68cb41221a7e67acaae63e5bf07eb -r ecb651f99c6e02c03676ed2bcc79fa25fab76f2a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -146,53 +146,35 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
-def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
-    if width == None:
-        width = (pf.domain_width[x_dict[axis]],
-                 pf.domain_width[y_dict[axis]])
+def StandardWidth(axis, width, depth, pf):
+    if width is None:
+        # Default to code units
+        if not iterable(axis):
+            width = ((pf.domain_width[x_dict[axis]], '1'),
+                     (pf.domain_width[y_dict[axis]], '1'))
+        else:
+            # axis is actually the normal vector
+            # for an off-axis data object.
+            width = ((pf.domain_width.min(), '1'),
+                     (pf.domain_width.min(), '1'))
     elif iterable(width): 
-        if isinstance(width[1],str):
-            w,unit = width
-            width = (w, w)
-        elif isinstance(width[1],tuple):
-            wx,unitx = width[0]
-            wy,unity = width[1]
-            width = (wx/pf[unitx],wy/pf[unity])
+        if isinstance(width[1], str):
+            width = (width, width)
+        elif isinstance(width[1], tuple):
+            pass
     else:
-        width = (width, width)
-    Wx, Wy = width
-    width = (Wx/pf[unit], Wy/pf[unit])
-    if isinstance(center,str):
-        if center.lower() == 'm' or center.lower() == 'max':
-            v, center = pf.h.find_max("Density")
-        elif center.lower() == "center" or center.lower() == "c":
-            center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        else:
-            raise RuntimeError('center keyword \"%s\" not recognized'%center)
-    bounds = [center[x_dict[axis]]-width[0]/2,
-              center[x_dict[axis]]+width[0]/2,
-              center[y_dict[axis]]-width[1]/2,
-              center[y_dict[axis]]+width[1]/2]
-    return (bounds,center)
-
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
-    if width == None:
-        width = (pf.domain_width.min(),
-                 pf.domain_width.min())
-    elif iterable(width) and isinstance(width[1],str):
-        w,unit = width
-        width = w
-    if not iterable(width):
-        width = (width, width)
-    Wx, Wy = width
-    width = np.array((Wx/pf[unit], Wy/pf[unit]))
-    if depth != None:
-        if iterable(depth) and isinstance(depth[1],str):
-            d,unit = depth
-            depth = d/pf[unit]
+        width = ((width, '1'), (width, '1'))
+    if depth is not None:
+        if iterable(depth) and isinstance(depth[1], str):
+            depth = (depth,)
         elif iterable(depth):
             raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
-        width = np.append(width,depth)
+        else:
+            depth = ((depth, '1'),)
+        width += depth
+    return width
+
+def StandardCenter(center, pf):
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -200,21 +182,39 @@
             center = (pf.domain_left_edge + pf.domain_right_edge) / 2
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
+    return center
 
-    if width.shape == (2,):
+def GetWindowParameters(axis, center, width, pf):
+    width = StandardWidth(axis, width, None, pf)
+    center = StandardCenter(center, pf)
+    units = (width[0][1], width[1][1])
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+              center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
+    return (bounds, center, units)
+
+def GetObliqueWindowParameters(normal, center, width, pf, depth=None):
+    width = StandardWidth(normal, width, depth, pf)
+    center = StandardCenter(center, pf)
+
+    if len(width) == 2:
         # Transforming to the cutting plane coordinate system
         center = np.array(center)
         center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-        width = width
     
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+        units = (width[0][1], width[1][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
-
-    return (bounds,center)
+        units = (width[0][1], width[1][1], width[2][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
+    return (bounds, center, units)
 
 class PlotWindow(object):
     _plot_valid = False
@@ -264,7 +264,7 @@
         self.oblique = oblique
         self.data_source = data_source
         self.buff_size = buff_size
-        self.antialias = True
+        self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         self.fontsize = fontsize
@@ -435,42 +435,43 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  the width of the image.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will  
+             use the supplied units.
         unit : str
-            the unit the width has been specified in.
-            defaults to code units.  If width is a tuple this 
-            argument is ignored
+             the unit the width has been specified in.
+             defaults to code units.  If width is a tuple this 
+             argument is ignored
 
         """
-        if iterable(width): 
-            if isinstance(width[1],str):
-                w, unit = width
-                width = (w, w)
-            elif isinstance(width[1], tuple):
-                wx,unitx = width[0]
-                wy,unity = width[1]
-                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        if width is not None:
+            set_axes_unit = True
         else:
-            width = (width, width)
-        Wx, Wy = width
-        width = (Wx,Wy)
-        width = [w / self.pf[unit] for w in width]
+            set_axes_unit = False
+
+        width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
         centery = (self.ylim[1] + self.ylim[0])/2. 
         
-        self.xlim = (centerx - width[0]/2.,
-                     centerx + width[0]/2.)
-        self.ylim = (centery - width[1]/2.,
-                     centery + width[1]/2.)
+        units = (width[0][1], width[1][1])
+
+        if set_axes_unit:
+            self._axes_unit_names = units
+        else:
+            self._axes_unit_names = None
+
+        self.xlim = (centerx - width[0][0]/self.pf[units[0]]/2.,
+                     centerx + width[0][0]/self.pf[units[0]]/2.)
+        self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
+                     centery + width[1][0]/self.pf[units[1]]/2.)
         
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
-            mw = max(width)
+            mw = max([width[0][0], width[1][0]])
             self.zlim = (centerz - mw/2.,
                          centerz + mw/2.)
-        
+
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -527,7 +528,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._unit = None
+        self._axes_unit_names = None
         self._callbacks = []
         self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
@@ -654,12 +655,14 @@
 
         Parameters
         ----------
-        unit_name : string
+        unit_name : string or two element tuple of strings
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+            units, and only show the axes name. If unit_name is a tuple, the first
+            element is assumed to be the unit for the x axis and the second element
+            the unit for the y axis.
 
         Raises
         ------
@@ -677,12 +680,13 @@
         >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
-        try:
-            self.pf[unit_name]
-        except KeyError: 
-            if unit_name is not None:
-                raise YTUnitNotRecognized(unit_name)
-        self._unit = unit_name
+        if unit_name is not None:
+            for un in unit_name:
+                try:
+                    self.pf[un]
+                except KeyError: 
+                    raise YTUnitNotRecognized(un)
+        self._axes_unit_names = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -690,10 +694,11 @@
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        if self._unit is None:
+        if self._axes_unit_names is None:
             unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
         else:
-            unit = self._unit
+            unit = self._axes_unit_names
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -707,16 +712,16 @@
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
-                x_width = x_width*self.pf[unit],
-                y_width = y_width*self.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma,
-                xc = xc, yc = yc, zc = zc)
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit],
-                      y_width = y_width*self.pf[unit],
-                      unit = unit, units = units, mi = mi, ma = ma,
-                      xc = xc, yc = yc, zc = zc)
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
     def get_field_units(self, field, strip_mathml = True):
@@ -745,9 +750,9 @@
     _plot_type = None
 
     def __init__(self, *args, **kwargs):
-        if self._frb_generator == None:
+        if self._frb_generator is None:
             self._frb_generator = kwargs.pop("frb_generator")
-        if self._plot_type == None:
+        if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
         PWViewer.__init__(self, *args, **kwargs)
 
@@ -776,42 +781,49 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.ylim[i] - yc for i in (0,1)])
-            extent = [el*self.pf[md['unit']] for el in extent]
+            (unit_x, unit_y) = md['axes_unit_names']
+
+            extentx = [(self.xlim[i] - xc) * self.pf[unit_x] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc) * self.pf[unit_y] for i in (0,1)]
+
+            extent = extentx + extenty
 
             if f in self.plots.keys():
-                zlim = (self.plots[f].zmin,self.plots[f].zmax)
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
             else:
-                zlim = (None,None)
+                zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0])/(self.ylim[1]-self.ylim[0])
-
+            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
             cbar_frac = 0.0
-            if aspect > 1.0:
-                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
+            if plot_aspect > 1.0:
+                size = (norm_size*(1.+cbar_frac), norm_size/plot_aspect)
             else:
-                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
+                size = (plot_aspect*norm_size*(1.+cbar_frac), norm_size)
 
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
+            # Correct the aspect ratio in case unit_x and unit_y are different
+            aspect = self.pf[unit_x]/self.pf[unit_y]
+            
+            self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
+
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
-            if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit']+')'
+            axes_unit_labels = ['', '']
+            for i, un in enumerate((unit_x, unit_y)):
+                if un not in ['1', 'u', 'unitary']:
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    
+            if self.oblique:
+                labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
+                          r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
             else:
-                axes_unit_label = ''
-
-            if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_label + r'}$' for i in (0,1)]
-            else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                          axes_unit_labels[i] + r'}$' for i in (0,1)]
 
             self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
             self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
@@ -831,14 +843,14 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['units'] == None or md['units'] == '':
+            if md['colorbar_unit'] is None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
-                    parser.parse(r'$'+md['units']+r'$')
+                    parser.parse(r'$'+md['colorbar_unit']+r'$')
                 except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-                label = field_name+r'$\/\/('+md['units']+r')$'
+                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
+                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
 
             self.plots[f].cb.set_label(label,fontsize=self.fontsize)
 
@@ -905,9 +917,9 @@
         """
         names = []
         if mpl_kwargs is None: mpl_kwargs = {}
-        if name == None:
+        if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1012,13 +1024,14 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -1038,12 +1051,14 @@
         >>> p.save('sliceplot')
         
         """
-        # tHis will handle time series data and controllers
+        # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1096,23 +1111,24 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will 
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : A string
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options: 'left-domain', corresponding
-            to the bottom-left hand corner of the simulation domain, 'center-domain',
-            corresponding the center of the simulation domain, or 'center-window' for 
-            the center of the plot window.
+             The location of the origin of the plot coordinate system.
+             Currently, can be set to three options: 'left-domain', corresponding
+             to the bottom-left hand corner of the simulation domain, 'center-domain',
+             corresponding the center of the simulation domain, or 'center-window' for 
+             the center of the plot window.
         weight_field : string
-            The name of the weighting field.  Set to None for no weight.
+             The name of the weighting field.  Set to None for no weight.
         max_level: int
-            The maximum level to project to.
+             The maximum level to project to.
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         
@@ -1130,7 +1146,9 @@
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None  and units != ('1', '1'):
+            axes_unit = units
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1139,7 +1157,7 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
                  axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
@@ -1180,8 +1198,10 @@
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
-        cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
@@ -1214,8 +1234,8 @@
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
+                 depth=(1, '1'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
                  le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
@@ -1264,8 +1284,9 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
-        # Hard-coding the resolution for now
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        if axes_unit is None and units != ('1', '1', '1'):
+            axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
         OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
@@ -1279,9 +1300,9 @@
 _metadata_template = """
 %(pf)s<br><br>
-Field of View:  %(x_width)0.3f %(unit)s<br>
-Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s<br>
+Field of View:  %(x_width)0.3f %(axes_unit_names)s<br>
+Minimum Value:  %(mi)0.3e %(colorbar_unit)s<br>
+Maximum Value:  %(ma)0.3e %(colorbar_unit)s<br>
 Central Point:  (data coords)<br>
    %(xc)0.14f<br>
    %(yc)0.14f<br>
@@ -1473,7 +1494,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         
         if suffix == '':
             suffix = '.png'
@@ -1539,17 +1560,17 @@
         return f.read()
 
 class WindowPlotMPL(PlotMPL):
-    def __init__(self, data, extent, field_transform, cmap, size, zlim):
+    def __init__(self, data, extent, aspect, field_transform, cmap, size, zlim):
         self.zmin, self.zmax = zlim
         PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, field_transform, cmap)
+        self.__init_image(data, extent, aspect, field_transform, cmap)
 
-    def __init_image(self, data, extent, field_transform, cmap):
+    def __init_image(self, data, extent, aspect, field_transform, cmap):
         if (field_transform.name == 'log10'):
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent = extent,
-                                      norm = norm, vmin = self.zmin, 
-                                      vmax = self.zmax, cmap = cmap)
-        self.image.axes.ticklabel_format(scilimits=(-4,3))
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, aspect=aspect, 
+                                      vmax=self.zmax, cmap=cmap)
+        self.image.axes.ticklabel_format(scilimits=(-2,3))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/669c7e9b9921/
changeset:   669c7e9b9921
branch:      yt
user:        chummels
date:        2012-11-16 04:13:28
summary:     Modifying a few flags in the testing suite to be more descriptive.
affected #:  1 file

diff -r 3d6b030a34ad4bc6caf9049ccf29e40cba1a9e4f -r 669c7e9b992172390eeb1ac92075086472b9cc85 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -53,12 +53,12 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
+        parser.add_option("--answer-compare-name", dest="compare_name",
             default=_latest, help="The name against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="this_name",
             default=None,
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",



https://bitbucket.org/yt_analysis/yt-3.0/changeset/da3fcd605bcc/
changeset:   da3fcd605bcc
branch:      yt
user:        chummels
date:        2012-11-16 04:13:47
summary:     Merging.
affected #:  1 file

diff -r 669c7e9b992172390eeb1ac92075086472b9cc85 -r da3fcd605bcc411ff32fa2f9fc22ef2bc4c2658b yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -266,9 +266,9 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
         raise RuntimeError("Runtime parameter unitsystem with"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/aa9713fe0551/
changeset:   aa9713fe0551
branch:      yt
user:        brittonsmith
date:        2012-11-16 04:26:29
summary:     Making sure compare_name for answer testing is set to None if
--answer-store has been selected.
affected #:  1 file

diff -r da3fcd605bcc411ff32fa2f9fc22ef2bc4c2658b -r aa9713fe0551cf5855055c1bec43cf6e1d850426 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -93,7 +93,9 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-
+        if options.store_results:
+            options.compare_name = None
+            
         # Local/Cloud storage 
         if options.store_local_results:
             storage_class = AnswerTestLocalStorage



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f95c33973f14/
changeset:   f95c33973f14
branch:      yt
user:        jzuhone
date:        2012-11-16 04:32:48
summary:     Fixing the domain dimensions when dimensionality < 3
affected #:  2 files

diff -r ecb651f99c6e02c03676ed2bcc79fa25fab76f2a -r f95c33973f14369d3bdbe6f2d3dbd7b7c6f10228 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -331,6 +331,8 @@
             dimensionality = 2
         if grid['dimensions'][1] == 1 :
             dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = 1.
+        if dimensionality == 1 : self.domain_dimensions[1] = 1.
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()


diff -r ecb651f99c6e02c03676ed2bcc79fa25fab76f2a -r f95c33973f14369d3bdbe6f2d3dbd7b7c6f10228 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -418,7 +418,11 @@
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
-                        
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/aaa2e27c7cdf/
changeset:   aaa2e27c7cdf
branch:      yt
user:        jzuhone
date:        2012-11-16 04:33:53
summary:     Merging
affected #:  1 file

diff -r f95c33973f14369d3bdbe6f2d3dbd7b7c6f10228 -r aaa2e27c7cdf9b5a0a33c5a34ea730ce2ed3d408 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -53,12 +53,12 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
+        parser.add_option("--answer-compare-name", dest="compare_name",
             default=_latest, help="The name against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="this_name",
             default=None,
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
@@ -93,7 +93,9 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-
+        if options.store_results:
+            options.compare_name = None
+            
         # Local/Cloud storage 
         if options.store_local_results:
             storage_class = AnswerTestLocalStorage



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f58eae8df605/
changeset:   f58eae8df605
branch:      yt
user:        chummels
date:        2012-11-16 04:35:03
summary:     Updating the metavars on the help messages for the testing suite to reflect the variable types expected at the command line call.
affected #:  1 file

diff -r da3fcd605bcc411ff32fa2f9fc22ef2bc4c2658b -r f58eae8df6053e0e3c1aea1498c7177d5631ce9f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -53,12 +53,12 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare-name", dest="compare_name",
+        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
             default=_latest, help="The name against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-store-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="this_name", metavar='str',
             default=None,
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b22a96f4172f/
changeset:   b22a96f4172f
branch:      yt
user:        chummels
date:        2012-11-16 04:35:16
summary:     Merging.
affected #:  3 files

diff -r f58eae8df6053e0e3c1aea1498c7177d5631ce9f -r b22a96f4172f87443ef19bc060b2725c8478cf4d yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -331,6 +331,8 @@
             dimensionality = 2
         if grid['dimensions'][1] == 1 :
             dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = 1.
+        if dimensionality == 1 : self.domain_dimensions[1] = 1.
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()


diff -r f58eae8df6053e0e3c1aea1498c7177d5631ce9f -r b22a96f4172f87443ef19bc060b2725c8478cf4d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -418,7 +418,11 @@
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
-                        
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])


diff -r f58eae8df6053e0e3c1aea1498c7177d5631ce9f -r b22a96f4172f87443ef19bc060b2725c8478cf4d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -93,7 +93,9 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-
+        if options.store_results:
+            options.compare_name = None
+            
         # Local/Cloud storage 
         if options.store_local_results:
             storage_class = AnswerTestLocalStorage



https://bitbucket.org/yt_analysis/yt-3.0/changeset/01f461871682/
changeset:   01f461871682
branch:      yt
user:        ngoldbaum
date:        2012-11-16 04:38:49
summary:     Fixing a typo in the text of an error in the FLASH frontend.
affected #:  1 file

diff -r c57187ca13b69862a02b80af7b5b3eb83698a4c6 -r 01f46187168280aef5dd9afa5ae227ca3f08fe8c yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -271,7 +271,7 @@
     if pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
-        raise RuntimeError("Runtime parameter unitsystem with"
+        raise RuntimeError("Runtime parameter unitsystem with "
                            "value %s is unrecognized" % pf['unitsystem'])
     return factor
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d77c88afbd38/
changeset:   d77c88afbd38
branch:      yt
user:        ngoldbaum
date:        2012-11-16 04:39:10
summary:     Merging,
affected #:  4 files

diff -r 01f46187168280aef5dd9afa5ae227ca3f08fe8c -r d77c88afbd38f49246ced0c09d2abfc7973e5ce5 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -331,6 +331,8 @@
             dimensionality = 2
         if grid['dimensions'][1] == 1 :
             dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = 1.
+        if dimensionality == 1 : self.domain_dimensions[1] = 1.
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()


diff -r 01f46187168280aef5dd9afa5ae227ca3f08fe8c -r d77c88afbd38f49246ced0c09d2abfc7973e5ce5 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -418,7 +418,11 @@
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
-                        
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])


diff -r 01f46187168280aef5dd9afa5ae227ca3f08fe8c -r d77c88afbd38f49246ced0c09d2abfc7973e5ce5 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -266,9 +266,9 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
         raise RuntimeError("Runtime parameter unitsystem with "


diff -r 01f46187168280aef5dd9afa5ae227ca3f08fe8c -r d77c88afbd38f49246ced0c09d2abfc7973e5ce5 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -53,12 +53,12 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
+        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
             default=_latest, help="The name against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="this_name", metavar='str',
             default=None,
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
@@ -93,7 +93,9 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-
+        if options.store_results:
+            options.compare_name = None
+            
         # Local/Cloud storage 
         if options.store_local_results:
             storage_class = AnswerTestLocalStorage
@@ -216,9 +218,9 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h



https://bitbucket.org/yt_analysis/yt-3.0/changeset/cf7b6d987fa1/
changeset:   cf7b6d987fa1
branch:      yt
user:        chummels
date:        2012-11-16 05:44:33
summary:     Removing --answer-store flag in testing interface, since setting the answer-store-name automatically sets this bool to true.  Also made it so tester dies when one tries to store a dataset and compare against another dataset.
affected #:  1 file

diff -r b22a96f4172f87443ef19bc060b2725c8478cf4d -r cf7b6d987fa13bfbf96fa0952d1c37a753aa27f4 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,6 +29,7 @@
 import contextlib
 import urllib2
 import cPickle
+import sys
 
 from nose.plugins import Plugin
 from yt.testing import *
@@ -54,15 +55,13 @@
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
         parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
-            default=_latest, help="The name against which we will compare")
+            default=_latest, help="The name of tests against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-store-name", dest="this_name", metavar='str',
+        parser.add_option("--answer-store-name", dest="store_name", metavar='str',
             default=None,
             help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
@@ -83,8 +82,15 @@
         if not self.enabled:
             return
         disable_stream_logging()
-        if options.this_name is None: 
-            options.this_name = self.my_version
+        if options.store_name is not None:
+            self.store_results = True
+        # Making sure the user isn't trying to store and compare simultaneously
+            if options.compare_name is not None: 
+                sys.exit("You cannot store and compare simultaneously.")
+            options.compare_name = None
+        else: 
+            self.store_results = False
+            options.store_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -93,8 +99,6 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-        if options.store_results:
-            options.compare_name = None
             
         # Local/Cloud storage 
         if options.store_local_results:
@@ -104,20 +108,20 @@
                 options.compare_name = "%s/%s/%s" % \
                     (os.path.realpath(options.output_dir), options.compare_name, 
                      options.compare_name)
-            if options.this_name is not None:
+            if options.store_name is not None:
                 name_dir_path = "%s/%s" % \
                     (os.path.realpath(options.output_dir), 
-                    options.this_name)
+                    options.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.mkdir(name_dir_path)
-                options.this_name= "%s/%s" % \
-                        (name_dir_path, options.this_name)
+                options.store_name= "%s/%s" % \
+                        (name_dir_path, options.store_name)
         else:
             storage_class = AnswerTestCloudStorage
 
         # Initialize answer/reference storage
         AnswerTestingTest.reference_storage = self.storage = \
-                storage_class(options.compare_name, options.this_name)
+                storage_class(options.compare_name, options.store_name)
 
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9c43f0134dd1/
changeset:   9c43f0134dd1
branch:      yt
user:        chummels
date:        2012-11-16 05:44:44
summary:     merging.
affected #:  1 file

diff -r cf7b6d987fa13bfbf96fa0952d1c37a753aa27f4 -r 9c43f0134dd11071e0e30f846e8f001e9a50ab86 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -271,7 +271,7 @@
     elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
-        raise RuntimeError("Runtime parameter unitsystem with"
+        raise RuntimeError("Runtime parameter unitsystem with "
                            "value %s is unrecognized" % pf['unitsystem'])
     return factor
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/914bda39f0e1/
changeset:   914bda39f0e1
branch:      yt
user:        chummels
date:        2012-11-16 05:51:41
summary:     Fixing a bug I just introduced.
affected #:  1 file

diff -r 9c43f0134dd11071e0e30f846e8f001e9a50ab86 -r 914bda39f0e12f856e23a915003cbdc3637ed67d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -84,9 +84,8 @@
         disable_stream_logging()
         if options.store_name is not None:
             self.store_results = True
-        # Making sure the user isn't trying to store and compare simultaneously
-            if options.compare_name is not None: 
-                sys.exit("You cannot store and compare simultaneously.")
+        # If the user sets the storage_name, then it means they are storing and
+        # not comparing, even if they set the compare_name (since it is set by default)
             options.compare_name = None
         else: 
             self.store_results = False
@@ -123,7 +122,6 @@
         AnswerTestingTest.reference_storage = self.storage = \
                 storage_class(options.compare_name, options.store_name)
 
-        self.store_results = options.store_results
         self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8b9f0a2ec31a/
changeset:   8b9f0a2ec31a
branch:      yt
user:        jzuhone
date:        2012-11-17 13:15:41
summary:     A bug fix for when there is only one FLASH grid.
affected #:  2 files

diff -r c57187ca13b69862a02b80af7b5b3eb83698a4c6 -r 8b9f0a2ec31a3c1e56ffb25f79a9713688597624 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,3 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
-
-[nosetests]
-detailed-errors=1
-where=yt
-exclude=answer_testing
-with-xunit=1


diff -r c57187ca13b69862a02b80af7b5b3eb83698a4c6 -r 8b9f0a2ec31a3c1e56ffb25f79a9713688597624 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -126,8 +126,11 @@
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
-        np.add.accumulate(self.grid_particle_count.squeeze(),
-                          out=self._particle_indices[1:])
+        if self.num_grids > 1 :
+            np.add.accumulate(self.grid_particle_count.squeeze(),
+                              out=self._particle_indices[1:])
+        else :
+            self._particle_indices[1] = self.grid_particle_count.squeeze()
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/cd2e4c940137/
changeset:   cd2e4c940137
branch:      yt
user:        jzuhone
date:        2012-11-17 13:16:07
summary:     Merging
affected #:  4 files

diff -r 8b9f0a2ec31a3c1e56ffb25f79a9713688597624 -r cd2e4c940137c56458d7a0f0ab2e8394207da288 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -331,6 +331,8 @@
             dimensionality = 2
         if grid['dimensions'][1] == 1 :
             dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = 1.
+        if dimensionality == 1 : self.domain_dimensions[1] = 1.
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()


diff -r 8b9f0a2ec31a3c1e56ffb25f79a9713688597624 -r cd2e4c940137c56458d7a0f0ab2e8394207da288 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -421,7 +421,11 @@
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
-                        
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])


diff -r 8b9f0a2ec31a3c1e56ffb25f79a9713688597624 -r cd2e4c940137c56458d7a0f0ab2e8394207da288 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -266,9 +266,9 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
         raise RuntimeError("Runtime parameter unitsystem with"


diff -r 8b9f0a2ec31a3c1e56ffb25f79a9713688597624 -r cd2e4c940137c56458d7a0f0ab2e8394207da288 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -216,9 +216,9 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d491ba54ddbd/
changeset:   d491ba54ddbd
branch:      yt
user:        jzuhone
date:        2012-11-17 13:16:41
summary:     Merging
affected #:  2 files

diff -r cd2e4c940137c56458d7a0f0ab2e8394207da288 -r d491ba54ddbdf07b04e5733db2e1e6396162a11e yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -271,7 +271,7 @@
     elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
-        raise RuntimeError("Runtime parameter unitsystem with"
+        raise RuntimeError("Runtime parameter unitsystem with "
                            "value %s is unrecognized" % pf['unitsystem'])
     return factor
 


diff -r cd2e4c940137c56458d7a0f0ab2e8394207da288 -r d491ba54ddbdf07b04e5733db2e1e6396162a11e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,6 +29,7 @@
 import contextlib
 import urllib2
 import cPickle
+import sys
 
 from nose.plugins import Plugin
 from yt.testing import *
@@ -53,16 +54,14 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
+            default=_latest, help="The name of tests against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="store_name", metavar='str',
             default=None,
             help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
@@ -83,8 +82,14 @@
         if not self.enabled:
             return
         disable_stream_logging()
-        if options.this_name is None: 
-            options.this_name = self.my_version
+        if options.store_name is not None:
+            self.store_results = True
+        # If the user sets the storage_name, then it means they are storing and
+        # not comparing, even if they set the compare_name (since it is set by default)
+            options.compare_name = None
+        else: 
+            self.store_results = False
+            options.store_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -93,7 +98,7 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-
+            
         # Local/Cloud storage 
         if options.store_local_results:
             storage_class = AnswerTestLocalStorage
@@ -102,22 +107,21 @@
                 options.compare_name = "%s/%s/%s" % \
                     (os.path.realpath(options.output_dir), options.compare_name, 
                      options.compare_name)
-            if options.this_name is not None:
+            if options.store_name is not None:
                 name_dir_path = "%s/%s" % \
                     (os.path.realpath(options.output_dir), 
-                    options.this_name)
+                    options.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.mkdir(name_dir_path)
-                options.this_name= "%s/%s" % \
-                        (name_dir_path, options.this_name)
+                options.store_name= "%s/%s" % \
+                        (name_dir_path, options.store_name)
         else:
             storage_class = AnswerTestCloudStorage
 
         # Initialize answer/reference storage
         AnswerTestingTest.reference_storage = self.storage = \
-                storage_class(options.compare_name, options.this_name)
+                storage_class(options.compare_name, options.store_name)
 
-        self.store_results = options.store_results
         self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data



https://bitbucket.org/yt_analysis/yt-3.0/changeset/928bf03996bb/
changeset:   928bf03996bb
branch:      yt
user:        jzuhone
date:        2012-11-17 13:18:43
summary:     Fixing a bug I just introduced.
affected #:  1 file

diff -r d491ba54ddbdf07b04e5733db2e1e6396162a11e -r 928bf03996bb8a0704b89e5bede9ace9a57aafa2 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1
\ No newline at end of file



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e0004f1bce41/
changeset:   e0004f1bce41
branch:      yt
user:        MatthewTurk
date:        2012-11-17 16:44:45
summary:     Changing os.mkdir to os.makedirs for selecting tests with not output dir
affected #:  1 file

diff -r 928bf03996bb8a0704b89e5bede9ace9a57aafa2 -r e0004f1bce411f5b19faa125fcaa23ab68d5fd6c yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -112,7 +112,7 @@
                     (os.path.realpath(options.output_dir), 
                     options.store_name)
                 if not os.path.isdir(name_dir_path):
-                    os.mkdir(name_dir_path)
+                    os.makedirs(name_dir_path)
                 options.store_name= "%s/%s" % \
                         (name_dir_path, options.store_name)
         else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/73d07ed5ec64/
changeset:   73d07ed5ec64
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-19 14:23:56
summary:     Merging from tip of development branch
affected #:  32 files

diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"




diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -81,6 +81,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -107,6 +167,7 @@
 FieldInfo = FieldInfoContainer()
 FieldInfo.name = id(FieldInfo)
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -326,7 +326,14 @@
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = 1.
+        if dimensionality == 1 : self.domain_dimensions[1] = 1.
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +341,8 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        ND = self.dimensionality
+        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/enzo/answer_testing_support.py
--- /dev/null
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -0,0 +1,111 @@
+"""
+Answer Testing support for Enzo.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+
+from yt.utilities.answer_testing.framework import \
+     AnswerTestingTest, \
+     can_run_pf, \
+     FieldValuesTest, \
+     GridHierarchyTest, \
+     GridValuesTest, \
+     ProjectionValuesTest, \
+     ParentageRelationshipsTest, \
+     temp_cwd
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+     
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        if 'particle' in field: continue
+        for ds in dso:
+            for axis in [0, 1, 2]:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds, decimals=3)
+            yield FieldValuesTest(
+                    pf_fn, field, ds, decimals=3)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -241,9 +241,13 @@
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
-        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
-                                parallel=parallel)
-        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+        init_outputs = []
+        for output in my_outputs:
+            if os.path.exists(output['filename']):
+                init_outputs.append(output['filename'])
+            
+        TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(init_outputs))
 
     def _parse_parameter_file(self):
         """
@@ -478,7 +482,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.
@@ -585,6 +589,8 @@
         if outputs is None:
             outputs = self.all_outputs
         my_outputs = []
+        if not outputs:
+            return my_outputs
         for value in values:
             outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
             if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -76,7 +76,7 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
-
+        self._particle_handle = pf._particle_handle
         self.float_type = np.float64
         GridGeometryHandler.__init__(self,pf,data_style)
 
@@ -86,9 +86,9 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._handle) :
+        if ("/particle names" in self._particle_handle) :
             self.field_list += ["particle_" + s[0].strip() for s
-                                in self._handle["/particle names"][:]]
+                                in self._particle_handle["/particle names"][:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -105,6 +105,7 @@
     def _parse_hierarchy(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
+        f_part = self._particle_handle # shortcut
         
         # Initialize to the domain left / domain right
         ND = self.parameter_file.dimensionality
@@ -127,12 +128,15 @@
                               for ax in 'xyz']
         self.grid_dimensions[:] *= (nxb, nyb, nzb)
         try:
-            self.grid_particle_count[:] = f["/localnp"][:][:,None]
+            self.grid_particle_count[:] = f_part["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
-        np.add.accumulate(self.grid_particle_count.squeeze(),
-                          out=self._particle_indices[1:])
+        if self.num_grids > 1 :
+            np.add.accumulate(self.grid_particle_count.squeeze(),
+                              out=self._particle_indices[1:])
+        else :
+            self._particle_indices[1] = self.grid_particle_count.squeeze()
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
@@ -223,6 +227,7 @@
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
+                 particle_filename = None, 
                  conversion_override = None):
 
         if self._handle is not None: return
@@ -230,6 +235,16 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None :
+            self._particle_handle = self._handle
+        else :
+            try :
+                self._particle_handle = h5py.File(self.particle_filename, "r")
+            except :
+                raise IOError(self.particle_filename)
+                                                                
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
@@ -288,6 +303,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):
@@ -410,16 +426,20 @@
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
 
+        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+            nblockx = self.parameters["nblockx"]
+            nblocky = self.parameters["nblocky"]
+            nblockz = self.parameters["nblockz"]
+        else : # Uniform Grid
+            nblockx = self.parameters["iprocs"]
+            nblocky = self.parameters["jprocs"]
+            nblockz = self.parameters["kprocs"]
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
-
-        self.geometry = self.parameters["geometry"]
-        if self.geometry == "cylindrical" and self.dimensionality == 2:
-            self.domain_left_edge[2] = 0.0
-            self.domain_right_edge[2] = 2.0 * np.pi
-
-        nblockx = self.parameters["nblockx"]
-        nblocky = self.parameters["nblocky"]
-        nblockz = self.parameters["nblockz"]
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -37,7 +37,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -321,12 +321,12 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
-        raise RuntimeError("Runtime parameter unitsystem with"
+        raise RuntimeError("Runtime parameter unitsystem with "
                            "value %s is unrecognized" % pf['unitsystem'])
     return factor
 


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -40,9 +40,11 @@
         # Now we cache the particle fields
         self.pf = pf
         self._handle = pf._handle
+        self._particle_handle = pf._particle_handle
+        
         try :
             particle_fields = [s[0].strip() for s in
-                               self._handle["/particle names"][:]]
+                               self._particle_handle["/particle names"][:]]
             self._particle_fields = dict([("particle_" + s, i) for i, s in
                                           enumerate(particle_fields)])
         except KeyError:
@@ -54,12 +56,13 @@
 
     def _read_data_set(self, grid, field):
         f = self._handle
+        f_part = self._particle_handle
         if field in self._particle_fields:
             if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]
-            tr = f["/tracer particles"][start:end, fi]
+            tr = f_part["/tracer particles"][start:end, fi]
         else:
             tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
         return tr.astype("float64")


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -457,3 +459,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -568,6 +568,9 @@
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
 
+def get_image_suffix(name):
+    suffix = os.path.splitext(name)[1]
+    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''
 
 # This is a modification of:
 # http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -132,17 +132,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,9 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
-def assert_rel_equal(a1, a2, decimals):
+def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -38,7 +39,8 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg,
+                               verbose=verbose)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,12 +29,15 @@
 import contextlib
 import urllib2
 import cPickle
+import sys
 
 from nose.plugins import Plugin
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -47,68 +50,99 @@
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
+    _my_version = None
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
+            default=_latest, help="The name of tests against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="store_name", metavar='str',
             default=None,
             help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False, action="store_true", help="Store/Load local results?")
+
+    @property
+    def my_version(self, version=None):
+        if self._my_version is not None:
+            return self._my_version
+        if version is None:
+            try:
+                version = get_yt_version()
+            except:
+                version = "UNKNOWN%s" % (time.time())
+        self._my_version = version
+        return self._my_version
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
         disable_stream_logging()
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
+        if options.store_name is not None:
+            self.store_results = True
+        # If the user sets the storage_name, then it means they are storing and
+        # not comparing, even if they set the compare_name (since it is set by default)
+            options.compare_name = None
+        else: 
+            self.store_results = False
+            options.store_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
-            AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
-        self.store_results = options.store_results
+        if options.compare_name == "SKIP":
+            options.compare_name = None
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+            
+        # Local/Cloud storage 
+        if options.store_local_results:
+            storage_class = AnswerTestLocalStorage
+            # Fix up filename for local storage 
+            if options.compare_name is not None:
+                options.compare_name = "%s/%s/%s" % \
+                    (os.path.realpath(options.output_dir), options.compare_name, 
+                     options.compare_name)
+            if options.store_name is not None:
+                name_dir_path = "%s/%s" % \
+                    (os.path.realpath(options.output_dir), 
+                    options.store_name)
+                if not os.path.isdir(name_dir_path):
+                    os.makedirs(name_dir_path)
+                options.store_name= "%s/%s" % \
+                        (name_dir_path, options.store_name)
+        else:
+            storage_class = AnswerTestCloudStorage
+
+        # Initialize answer/reference storage
+        AnswerTestingTest.reference_storage = self.storage = \
+                storage_class(options.compare_name, options.store_name)
+
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self, result=None):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name
+        self.answer_name = answer_name
         self.cache = {}
+    def dump(self, result_storage, result):
+        raise NotImplementedError 
+    def get(self, pf_name, default=None):
+        raise NotImplementedError 
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if self.reference_name is None: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -123,6 +157,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.answer_name is None: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.answer_name is None: return
+        # Store data using shelve
+        ds = shelve.open(self.answer_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if self.reference_name is None: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -132,6 +207,10 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,27 +220,47 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
+    if not os.path.isdir(path):
+        return False
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
     result_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
-            if dd is None: raise YTNoOldAnswer()
+        if self.reference_storage.reference_name is not None:
+            dd = self.reference_storage.get(self.storage_name)
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -212,10 +311,12 @@
     _type_name = "FieldValues"
     _attrs = ("field", )
 
-    def __init__(self, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
+        self.decimals = decimals
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
@@ -225,19 +326,50 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        assert_equal(new_result, old_result)
+        err_msg = "Field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_allclose(new_result, old_result, 10.**(-self.decimals),
+                             err_msg=err_msg, verbose=True)
 
+class AllFieldValuesTest(AnswerTestingTest):
+    _type_name = "AllFieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
+        super(AllFieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        err_msg = "All field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg, verbose=True)
+            
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
 
     def __init__(self, pf_fn, axis, field, weight_field = None,
-                 obj_type = None):
+                 obj_type = None, decimals = None):
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
         self.obj_type = obj_type
+        self.decimals = decimals
 
     def run(self):
         if self.obj_type is not None:
@@ -254,7 +386,14 @@
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
-            assert_equal(new_result[k], old_result[k])
+            err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
+              (k, self.field, self.weight_field, self.axis)
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k],
+                             err_msg=err_msg)
+            else:
+                assert_allclose(new_result[k], old_result[k], 
+                                 10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -313,6 +452,26 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                     err_msg="Number of outputs not equal.",
+                     verbose=True)
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i],
+                         err_msg="Output times not equal.",
+                         verbose=True)
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -25,6 +25,7 @@
 
 # We don't need to import 'exceptions'
 #import exceptions
+import os.path
 
 class YTException(Exception):
     def __init__(self, pf = None):
@@ -198,3 +199,12 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, filename):
+        self.filename = filename
+        self.testname = os.path.basename(os.path.dirname(filename))
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -971,9 +971,9 @@
             tmax[i] = 1e60
     # We have to jumpstart our calculation
     for i in range(3):
-        if cur_ind[i] == vc.dims[i] and step[i] == 1:
+        if cur_ind[i] == vc.dims[i] and step[i] >= 0:
             return 0
-        if cur_ind[i] == -1 and step[i] == -1:
+        if cur_ind[i] == -1 and step[i] <= -1:
             return 0
     enter_t = intersect_t
     hit = 0


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -428,3 +428,25 @@
                     rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
                     rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
         return rvg
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -20,6 +20,7 @@
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
+stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -434,8 +434,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -457,7 +458,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = get_image_suffix(filename)
 
     if suffix == '':
         suffix = '.png'


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -630,6 +630,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -659,7 +662,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -806,6 +809,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -822,18 +826,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1167,10 +1172,11 @@
           }
     _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized = False, 
-                 bbox_dict = None, **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 


diff -r 2c1073f374f88915370e9354472e3c4d75253bd2 -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,20 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__
@@ -136,53 +146,35 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
-def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
-    if width == None:
-        width = (pf.domain_width[x_dict[axis]],
-                 pf.domain_width[y_dict[axis]])
+def StandardWidth(axis, width, depth, pf):
+    if width is None:
+        # Default to code units
+        if not iterable(axis):
+            width = ((pf.domain_width[x_dict[axis]], '1'),
+                     (pf.domain_width[y_dict[axis]], '1'))
+        else:
+            # axis is actually the normal vector
+            # for an off-axis data object.
+            width = ((pf.domain_width.min(), '1'),
+                     (pf.domain_width.min(), '1'))
     elif iterable(width): 
-        if isinstance(width[1],str):
-            w,unit = width
-            width = (w, w)
-        elif isinstance(width[1],tuple):
-            wx,unitx = width[0]
-            wy,unity = width[1]
-            width = (wx/pf[unitx],wy/pf[unity])
+        if isinstance(width[1], str):
+            width = (width, width)
+        elif isinstance(width[1], tuple):
+            pass
     else:
-        width = (width, width)
-    Wx, Wy = width
-    width = (Wx/pf[unit], Wy/pf[unit])
-    if isinstance(center,str):
-        if center.lower() == 'm' or center.lower() == 'max':
-            v, center = pf.h.find_max("Density")
-        elif center.lower() == "center" or center.lower() == "c":
-            center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        else:
-            raise RuntimeError('center keyword \"%s\" not recognized'%center)
-    bounds = [center[x_dict[axis]]-width[0]/2,
-              center[x_dict[axis]]+width[0]/2,
-              center[y_dict[axis]]-width[1]/2,
-              center[y_dict[axis]]+width[1]/2]
-    return (bounds,center)
-
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
-    if width == None:
-        width = (pf.domain_width.min(),
-                 pf.domain_width.min())
-    elif iterable(width) and isinstance(width[1],str):
-        w,unit = width
-        width = w
-    if not iterable(width):
-        width = (width, width)
-    Wx, Wy = width
-    width = np.array((Wx/pf[unit], Wy/pf[unit]))
-    if depth != None:
-        if iterable(depth) and isinstance(depth[1],str):
-            d,unit = depth
-            depth = d/pf[unit]
+        width = ((width, '1'), (width, '1'))
+    if depth is not None:
+        if iterable(depth) and isinstance(depth[1], str):
+            depth = (depth,)
         elif iterable(depth):
             raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
-        width = np.append(width,depth)
+        else:
+            depth = ((depth, '1'),)
+        width += depth
+    return width
+
+def StandardCenter(center, pf):
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -190,21 +182,39 @@
             center = (pf.domain_left_edge + pf.domain_right_edge) / 2
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
+    return center
 
-    if width.shape == (2,):
+def GetWindowParameters(axis, center, width, pf):
+    width = StandardWidth(axis, width, None, pf)
+    center = StandardCenter(center, pf)
+    units = (width[0][1], width[1][1])
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+              center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
+    return (bounds, center, units)
+
+def GetObliqueWindowParameters(normal, center, width, pf, depth=None):
+    width = StandardWidth(normal, width, depth, pf)
+    center = StandardCenter(center, pf)
+
+    if len(width) == 2:
         # Transforming to the cutting plane coordinate system
         center = np.array(center)
         center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-        width = width
     
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+        units = (width[0][1], width[1][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
-
-    return (bounds,center)
+        units = (width[0][1], width[1][1], width[2][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
+    return (bounds, center, units)
 
 class PlotWindow(object):
     _plot_valid = False
@@ -213,7 +223,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -243,6 +253,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -250,9 +264,10 @@
         self.oblique = oblique
         self.data_source = data_source
         self.buff_size = buff_size
-        self.antialias = True
+        self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -420,42 +435,43 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  the width of the image.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will  
+             use the supplied units.
         unit : str
-            the unit the width has been specified in.
-            defaults to code units.  If width is a tuple this 
-            argument is ignored
+             the unit the width has been specified in.
+             defaults to code units.  If width is a tuple this 
+             argument is ignored
 
         """
-        if iterable(width): 
-            if isinstance(width[1],str):
-                w, unit = width
-                width = (w, w)
-            elif isinstance(width[1], tuple):
-                wx,unitx = width[0]
-                wy,unity = width[1]
-                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        if width is not None:
+            set_axes_unit = True
         else:
-            width = (width, width)
-        Wx, Wy = width
-        width = (Wx,Wy)
-        width = [w / self.pf[unit] for w in width]
+            set_axes_unit = False
+
+        width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
         centery = (self.ylim[1] + self.ylim[0])/2. 
         
-        self.xlim = (centerx - width[0]/2.,
-                     centerx + width[0]/2.)
-        self.ylim = (centery - width[1]/2.,
-                     centery + width[1]/2.)
+        units = (width[0][1], width[1][1])
+
+        if set_axes_unit:
+            self._axes_unit_names = units
+        else:
+            self._axes_unit_names = None
+
+        self.xlim = (centerx - width[0][0]/self.pf[units[0]]/2.,
+                     centerx + width[0][0]/self.pf[units[0]]/2.)
+        self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
+                     centery + width[1][0]/self.pf[units[1]]/2.)
         
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
-            mw = max(width)
+            mw = max([width[0][0], width[1][0]])
             self.zlim = (centerz - mw/2.,
                          centerz + mw/2.)
-        
+
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -512,7 +528,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._unit = None
+        self._axes_unit_names = None
         self._callbacks = []
         self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
@@ -640,12 +656,14 @@
 
         Parameters
         ----------
-        unit_name : string
+        unit_name : string or two element tuple of strings
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+            units, and only show the axes name. If unit_name is a tuple, the first
+            element is assumed to be the unit for the x axis and the second element
+            the unit for the y axis.
 
         Raises
         ------
@@ -663,12 +681,13 @@
         >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
-        try:
-            self.pf[unit_name]
-        except KeyError: 
-            if unit_name is not None:
-                raise YTUnitNotRecognized(unit_name)
-        self._unit = unit_name
+        if unit_name is not None:
+            for un in unit_name:
+                try:
+                    self.pf[un]
+                except KeyError: 
+                    raise YTUnitNotRecognized(un)
+        self._axes_unit_names = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -676,10 +695,11 @@
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        if self._unit is None:
+        if self._axes_unit_names is None:
             unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
         else:
-            unit = self._unit
+            unit = self._axes_unit_names
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -693,16 +713,16 @@
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
-                x_width = x_width*self.pf[unit],
-                y_width = y_width*self.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma,
-                xc = xc, yc = yc, zc = zc)
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit],
-                      y_width = y_width*self.pf[unit],
-                      unit = unit, units = units, mi = mi, ma = ma,
-                      xc = xc, yc = yc, zc = zc)
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
     def get_field_units(self, field, strip_mathml = True):
@@ -733,68 +753,12 @@
     _plot_type = None
 
     def __init__(self, *args, **kwargs):
-        if self._frb_generator == None:
+        if self._frb_generator is None:
             self._frb_generator = kwargs.pop("frb_generator")
-        if self._plot_type == None:
+        if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
         PWViewer.__init__(self, *args, **kwargs)
 
-    def _setup_origin(self):
-        origin = self.origin
-        axis_index = self.data_source.axis
-        if isinstance(origin, basestring):
-            origin = tuple(origin.split('-'))[:3]
-        if 1 == len(origin):
-            origin = ('lower', 'left') + origin
-        elif 2 == len(origin) and origin[0] in set(['left','right','center']):
-            o0map = {'left': 'lower', 'right': 'upper', 'center': 'center'}
-            origin = (o0map[origin[0]],) + origin
-        elif 2 == len(origin) and origin[0] in set(['lower','upper','center']):
-            origin = (origin[0], 'center', origin[-1])
-        assert origin[-1] in ['window', 'domain']
-
-        if origin[2] == 'window':
-            xllim, xrlim = self.xlim
-            yllim, yrlim = self.ylim
-        elif origin[2] == 'domain':
-            xllim = self.pf.domain_left_edge[x_dict[axis_index]]
-            xrlim = self.pf.domain_right_edge[x_dict[axis_index]]
-            yllim = self.pf.domain_left_edge[y_dict[axis_index]]
-            yrlim = self.pf.domain_right_edge[y_dict[axis_index]]
-        else:
-            mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
-                   'or "center" as the last term in origin.').format(self.origin)
-            raise RuntimeError(msg)
-
-        if origin[0] == 'lower':
-            yc = yllim
-        elif origin[0] == 'upper':
-            yc = yrlim
-        elif origin[0] == 'center':
-            yc = (yllim + yrlim)/2.0
-        else:
-            mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "lower" '
-                   '"upper" or "center" as the first term in origin.')
-            msg = msg.format(self.origin)
-            raise RuntimeError(msg)
-
-        if origin[1] == 'left':
-            xc = xllim
-        elif origin[1] == 'right':
-            xc = xrlim
-        elif origin[1] == 'center':
-            xc = (xllim + xrlim)/2.0
-        else:
-            mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "left" '
-                   '"right" or "center" as the second term in origin.')
-            msg = msg.format(self.origin)
-            raise RuntimeError(msg)
-
-        return xc, yc
-
     def _setup_plots(self):
         if self._current_field is not None:
             fields = [self._current_field]
@@ -805,48 +769,69 @@
             md = self.get_metadata(f, strip_mathml = False, return_string = False)
             axis_index = self.data_source.axis
 
-            xc, yc = self._setup_origin()
+            if self.origin == 'center-window':
+                xc = (self.xlim[0]+self.xlim[1])/2
+                yc = (self.ylim[0]+self.ylim[1])/2
+            elif self.origin == 'center-domain':
+                xc = (self.pf.domain_left_edge[x_dict[axis_index]]+
+                      self.pf.domain_right_edge[x_dict[axis_index]])/2
+                yc = (self.pf.domain_left_edge[y_dict[axis_index]]+
+                      self.pf.domain_right_edge[y_dict[axis_index]])/2
+            elif self.origin == 'left-domain':
+                xc = self.pf.domain_left_edge[x_dict[axis_index]]
+                yc = self.pf.domain_left_edge[y_dict[axis_index]]
+            else:
+                raise RuntimeError(
+                    'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.ylim[i] - yc for i in (0,1)])
-            extent = [el*self.pf[md['unit']] for el in extent]
+            (unit_x, unit_y) = md['axes_unit_names']
+
+            extentx = [(self.xlim[i] - xc) * self.pf[unit_x] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc) * self.pf[unit_y] for i in (0,1)]
+
+            extent = extentx + extenty
 
             if f in self.plots.keys():
-                zlim = (self.plots[f].zmin,self.plots[f].zmax)
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
             else:
-                zlim = (None,None)
+                zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0])/(self.ylim[1]-self.ylim[0])
-
+            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
             cbar_frac = 0.0
-            if aspect > 1.0:
-                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
+            if plot_aspect > 1.0:
+                size = (norm_size*(1.+cbar_frac), norm_size/plot_aspect)
             else:
-                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
+                size = (plot_aspect*norm_size*(1.+cbar_frac), norm_size)
 
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
+            # Correct the aspect ratio in case unit_x and unit_y are different
+            aspect = self.pf[unit_x]/self.pf[unit_y]
+            
+            self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
+
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
-            if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit']+')'
+            axes_unit_labels = ['', '']
+            for i, un in enumerate((unit_x, unit_y)):
+                if un not in ['1', 'u', 'unitary']:
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    
+            if self.oblique:
+                labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
+                          r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
             else:
-                axes_unit_label = ''
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
+                          axes_unit_labels[i] + r'}$' for i in (0,1)]
 
-            if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_label + r'}$' for i in (0,1)]
-            else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
-
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
             ftype, fname = f
             field_name = self.data_source.pf._get_field_info(ftype, fname).display_name
 
@@ -861,16 +846,18 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(fname,field_name,str(err))
 
-            if md['units'] is None or md['units'] == '':
+            if md['colorbar_unit'] is None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
-                    parser.parse(r'$'+md['units']+r'$')
+                    parser.parse(r'$'+md['colorbar_unit']+r'$')
                 except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-                label = field_name+r'$\/\/('+md['units']+r')$'
+                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
+                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -933,9 +920,9 @@
         """
         names = []
         if mpl_kwargs is None: mpl_kwargs = {}
-        if name == None:
+        if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1007,7 +994,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1050,39 +1037,22 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
-        origin : string or length 1, 2, or 3 sequence of strings
-             The location of the origin of the plot coordinate system.  This is 
-             represented by '-' separated string or a tuple of strings.  In the
-             first index the y-location is given by 'lower', 'upper', or 'center'.
-             The second index is the x-location, given as 'left', 'right', or 
-             'center'.  Finally, the whether the origin is applied in 'domain' space
-             or plot 'window' space is given. For example, both 'upper-right-domain'
-             and ['upper', 'right', 'domain'] both place the origin in the upper
-             right hand corner of domain space. If x or y are not given, a value is 
-             inffered.  For instance, 'left-domain' corresponds to the lower-left 
-             hand corner of the simulation domain, 'center-domain' corresponds to the 
-             center of the simulation domain, or 'center-window' for the center of 
-             the plot window.  Further examples:
-
-             ==================================     ============================
-             format                                 example                
-             ==================================     ============================
-             '{space}'                              'domain'
-             '{xloc}-{space}'                       'left-window'
-             '{yloc}-{space}'                       'upper-domain'
-             '{yloc}-{xloc}-{space}'                'lower-right-window'
-             ('{space}',)                           ('window',)
-             ('{xloc}', '{space}')                  ('right', 'domain')
-             ('{yloc}', '{space}')                  ('lower', 'window')
-             ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
-             ==================================     ============================
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
+        origin : string
+             The location of the origin of the plot coordinate system.
+             Currently, can be set to three options: 'left-domain', corresponding
+             to the bottom-left hand corner of the simulation domain, 'center-domain',
+             corresponding the center of the simulation domain, or 'center-window' for 
+             the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1094,12 +1064,14 @@
         >>> p.save('sliceplot')
         
         """
-        # tHis will handle time series data and controllers
+        # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         slc = pf.h.slice(axis, center[axis])
         slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
@@ -1110,7 +1082,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1153,44 +1125,26 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will 
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
-        origin : string or length 1, 2, or 3 sequence of strings
-             The location of the origin of the plot coordinate system.  This is 
-             represented by '-' separated string or a tuple of strings.  In the
-             first index the y-location is given by 'lower', 'upper', or 'center'.
-             The second index is the x-location, given as 'left', 'right', or 
-             'center'.  Finally, the whether the origin is applied in 'domain' space
-             or plot 'window' space is given. For example, both 'upper-right-domain'
-             and ['upper', 'right', 'domain'] both place the origin in the upper
-             right hand corner of domain space. If x or y are not given, a value is 
-             inffered.  For instance, 'left-domain' corresponds to the lower-left 
-             hand corner of the simulation domain, 'center-domain' corresponds to the 
-             center of the simulation domain, or 'center-window' for the center of 
-             the plot window.Further examples:
-
-             ==================================     ============================
-             format                                 example
-             ==================================     ============================ 
-             '{space}'                              'domain'
-             '{xloc}-{space}'                       'left-window'
-             '{yloc}-{space}'                       'upper-domain'
-             '{yloc}-{xloc}-{space}'                'lower-right-window'
-             ('{space}',)                           ('window',)
-             ('{xloc}', '{space}')                  ('right', 'domain')
-             ('{yloc}', '{space}')                  ('lower', 'window')
-             ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
-             ==================================     ============================
-             
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
+        origin : A string
+             The location of the origin of the plot coordinate system.
+             Currently, can be set to three options: 'left-domain', corresponding
+             to the bottom-left hand corner of the simulation domain, 'center-domain',
+             corresponding the center of the simulation domain, or 'center-window' for 
+             the center of the plot window.
         weight_field : string
-            The name of the weighting field.  Set to None for no weight.
+             The name of the weighting field.  Set to None for no weight.
         max_level: int
-            The maximum level to project to.
+             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1206,8 +1160,10 @@
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
-        proj = pf.h.proj(fields, axis, weight_field=weight_field, center=center)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None  and units != ('1', '1'):
+            axes_unit = units
+        proj = pf.h.proj(fields, axis, weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
 
@@ -1215,8 +1171,8 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+    def __init__(self, pf, normal, fields, center='c', width=None, 
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1253,10 +1209,14 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
-        cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        cutting = pf.h.cutting(normal, center)
+        cutting.get_data(fields)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
@@ -1290,9 +1250,9 @@
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 depth=(1,'1'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1339,8 +1299,9 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
-        # Hard-coding the resolution for now
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        if axes_unit is None and units != ('1', '1', '1'):
+            axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
         OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
@@ -1354,9 +1315,9 @@
 _metadata_template = """
 %(pf)s<br><br>
-Field of View:  %(x_width)0.3f %(unit)s<br>
-Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s<br>
+Field of View:  %(x_width)0.3f %(axes_unit_names)s<br>
+Minimum Value:  %(mi)0.3e %(colorbar_unit)s<br>
+Maximum Value:  %(ma)0.3e %(colorbar_unit)s<br>
 Central Point:  (data coords)<br>
    %(xc)0.14f<br>
    %(yc)0.14f<br>
@@ -1550,7 +1511,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         
         if suffix == '':
             suffix = '.png'
@@ -1616,18 +1577,17 @@
         return f.read()
 
 class WindowPlotMPL(PlotMPL):
-    def __init__(self, data, extent, field_transform, cmap, size, zlim):
+    def __init__(self, data, extent, aspect, field_transform, cmap, size, zlim):
         self.zmin, self.zmax = zlim
         PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, field_transform, cmap)
+        self.__init_image(data, extent, aspect, field_transform, cmap)
 
-    def __init_image(self, data, extent, field_transform, cmap):
+    def __init_image(self, data, extent, aspect, field_transform, cmap):
         if (field_transform.name == 'log10'):
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent = extent,
-                                      norm = norm, vmin = self.zmin, 
-                                      vmax = self.zmax, cmap = cmap)
-        self.image.axes.ticklabel_format(scilimits=(-4,3))
-
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, aspect=aspect, 
+                                      vmax=self.zmax, cmap=cmap)
+        self.image.axes.ticklabel_format(scilimits=(-2,3))





https://bitbucket.org/yt_analysis/yt-3.0/changeset/82badbfa3927/
changeset:   82badbfa3927
branch:      yt-3.0
user:        MatthewTurk
date:        2012-11-19 14:24:38
summary:     Changing install_script to install yt 3.0 into yt-3.0-hg
affected #:  1 file

diff -r 73d07ed5ec643a97f2b8c2b40c6b9514bc8b7304 -r 82badbfa39279f2a688c213a11491f4c54e7ca19 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -613,19 +613,19 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-3.0-hg ] 
     then
-        YT_DIR="$PWD/yt-hg/"
+        YT_DIR="$PWD/yt-3.0-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
         # a new webserver, we are now moving to a three-stage process.
         # First we clone the repo, but only up to r0.
-        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-3.0/ ./yt-3.0-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-3.0-hg ] 
     then
-        YT_DIR="$PWD/yt-hg/"
+        YT_DIR="$PWD/yt-3.0-hg/"
     fi
     echo Setting YT_DIR=${YT_DIR}
 fi

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list