[yt-svn] commit/yt: 103 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Jun 15 15:54:57 PDT 2012


103 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/73a023c40f7b/
changeset:   73a023c40f7b
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-09 18:47:29
summary:     Merging from the dev tip to geometry handling
affected #:  283 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/244b8f82e4cc/
changeset:   244b8f82e4cc
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 17:18:45
summary:     Merging from yt branch
affected #:  74 files

diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -21,6 +21,9 @@
    be "na.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
+   "SpecialGrid.__init__()".
  * Doc strings should describe input, output, behavior, and any state changes
    that occur on an object.  See the file `doc/docstring_example.txt` for a
    fiducial example of a docstring.


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b tests/object_field_values.py
--- a/tests/object_field_values.py
+++ b/tests/object_field_values.py
@@ -73,12 +73,42 @@
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
 
+class YTExtractIsocontoursTest(YTFieldValuesTest):
+    def run(self):
+        val = self.data_object.quantities["WeightedAverageQuantity"](
+            "Density", "Density")
+        rset = self.data_object.extract_isocontours("Density",
+            val, rescale = False, sample_values = "Temperature")
+        self.result = rset
+
+    def compare(self, old_result):
+        if self.result[0].size == 0 and old_result[0].size == 0:
+            return True
+        self.compare_array_delta(self.result[0].ravel(),
+                                 old_result[0].ravel(), 1e-7)
+        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
+
+class YTIsocontourFluxTest(YTFieldValuesTest):
+    def run(self):
+        val = self.data_object.quantities["WeightedAverageQuantity"](
+            "Density", "Density")
+        flux = self.data_object.calculate_isocontour_flux(
+           "Density", val, "x-velocity", "y-velocity", "z-velocity")
+        self.result = flux
+
+    def compare(self, old_result):
+        self.compare_value_delta(self.result, old_result, 1e-7)
+
 for object_name in known_objects:
     for field in field_list + particle_field_list:
         if "cut_region" in object_name and field in particle_field_list:
             continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
+    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
+                object_name = object_name)
+    create_test(YTIsocontourFluxTest, "%s" % (object_name),
+                object_name = object_name)
     
 class YTDerivedQuantityTest(YTStaticOutputTest):
     def setup(self):
@@ -140,4 +170,3 @@
                     "%s_%s" % (object_name, field),
                     field_name = field, 
                     object_name = object_name)
-


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -1,7 +1,7 @@
 import matplotlib; matplotlib.use('Agg')
 from yt.config import ytcfg
-ytcfg["yt","loglevel"] = "50"
-ytcfg["yt","serialize"] = "False"
+ytcfg["yt", "loglevel"] = "50"
+ytcfg["yt", "serialize"] = "False"
 
 from yt.utilities.answer_testing.api import \
     RegressionTestRunner, clear_registry, create_test, \
@@ -58,23 +58,23 @@
         my_hash = "UNKNOWN%s" % (time.time())
     parser = optparse.OptionParser()
     parser.add_option("-f", "--parameter-file", dest="parameter_file",
-                      default = os.path.join(cwd, "DD0010/moving7_0010"),
-                      help = "The parameter file value to feed to 'load' to test against",
-                      )
+                      default=os.path.join(cwd, "DD0010/moving7_0010"),
+                      help="The parameter file value to feed to 'load' to test against")
     parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-                      default = False, help = "List all tests and then exit")
+                      default=False, help="List all tests and then exit")
     parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-                      help = "The test name pattern to match.  Can include wildcards.")
+                      help="The test name pattern to match.  Can include wildcards.")
     parser.add_option("-o", "--output", dest="storage_dir",
                       default=test_storage_directory,
-                      help = "Base directory for storing test output.")
+                      help="Base directory for storing test output.")
     parser.add_option("-c", "--compare", dest="compare_name",
                       default=None,
-                      help = "The name against which we will compare")
+                      help="The name against which we will compare")
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
-                      help = "The name we'll call this set of tests")
+                      help="The name we'll call this set of tests")
     opts, args = parser.parse_args()
+
     if opts.list_tests:
         tests_to_run = []
         for m, vals in mapping.items():
@@ -86,10 +86,13 @@
         tests = list(set(tests_to_run))
         print "\n    ".join(tests)
         sys.exit(0)
+
+    # Load the test pf and make sure it's good.
     pf = load(opts.parameter_file)
     if pf is None:
         print "Couldn't load the specified parameter file."
         sys.exit(1)
+
     # Now we modify our compare name and self name to include the pf.
     compare_id = opts.compare_name
     watcher = None
@@ -97,14 +100,17 @@
         compare_id += "_%s_%s" % (pf, pf._hash())
         watcher = Xunit()
     this_id = opts.this_name + "_%s_%s" % (pf, pf._hash())
+
     rtr = RegressionTestRunner(this_id, compare_id,
-            results_path = opts.storage_dir,
-            compare_results_path = opts.storage_dir,
-            io_log = [opts.parameter_file])
+                               results_path=opts.storage_dir,
+                               compare_results_path=opts.storage_dir,
+                               io_log=[opts.parameter_file])
+
     rtr.watcher = watcher
     tests_to_run = []
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
+
         if len(new_tests) == 0: continue
         load_tests(m, cwd)
         keys = set(registry_entries())




diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -42,7 +42,7 @@
     __global_parallel_size = '1',
     __topcomm_parallel_rank = '0',
     __topcomm_parallel_size = '1',
-    storeparameterfiles = 'True',
+    storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',
     loadfieldplugins = 'True',


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -68,7 +68,6 @@
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
-    CodeFieldInfoContainer, \
     NeedsGridType, \
     NeedsOriginalGrid, \
     NeedsDataField, \


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1599,11 +1599,12 @@
         # It is probably faster, as it consolidates IO, but if we did it in
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
+            dependencies = self.get_dependencies(fields, ghost_zones = False)
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
-                    self.get_dependencies(fields))
+                    dependencies)
             self.comm.preload([g for g in self._get_grid_objs()],
-                          self.get_dependencies(fields), self.hierarchy.io)
+                          dependencies, self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
@@ -2233,7 +2234,7 @@
         return dls
 
     def _get_data_from_grid(self, grid, fields, dls):
-        g_fields = [grid[field] for field in fields]
+        g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
@@ -2243,7 +2244,7 @@
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
 
-class AMR3DData(AMRData, GridPropertiesMixin):
+class AMR3DData(AMRData, GridPropertiesMixin, ParallelAnalysisInterface):
     _key_fields = ['x','y','z','dx','dy','dz']
     """
     Class describing a cluster of data points, not necessarily sharing any
@@ -2257,6 +2258,7 @@
         used as a base class.  Note that *center* is supplied, but only used
         for fields and quantities that require it.
         """
+        ParallelAnalysisInterface.__init__(self)
         AMRData.__init__(self, pf, fields, **kwargs)
         self._set_center(center)
         self.coords = None
@@ -2341,11 +2343,14 @@
             f = grid[field]
             return na.array([f[i,:][pointI] for i in range(3)])
         else:
+            tr = grid[field]
+            if tr.size == 1: # dx, dy, dz, cellvolume
+                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+            if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
+                and self._is_fully_enclosed(grid):
+                return tr.ravel()
             pointI = self._get_point_indices(grid)
-            if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions, dtype='float64')
-                return t[pointI].ravel()
-            return grid[field][pointI].ravel()
+            return tr[pointI].ravel()
 
     def _flush_data_to_grids(self, field, default_val, dtype='float32'):
         """
@@ -2456,12 +2461,19 @@
             format.  Suitable for loading into meshlab.
         rescale : bool, optional
             If true, the vertices will be rescaled within their min/max.
+        sample_values : string, optional
+            Any field whose value should be extracted at the center of each
+            triangle.
 
         Returns
         -------
         verts : array of floats
             The array of vertices, x,y,z.  Taken in threes, these are the
             triangle vertices.
+        samples : array of floats
+            If `sample_values` is specified, this will be returned and will
+            contain the values of the field specified at the center of each
+            triangle.
 
         References
         ----------
@@ -2481,9 +2493,7 @@
         """
         verts = []
         samples = []
-        pb = get_pbar("Extracting Isocontours", len(self._grids))
-        for i, g in enumerate(self._grids):
-            pb.update(i)
+        for i, g in enumerate(self._get_grid_objs()):
             mask = self._get_cut_mask(g) * g.child_mask
             vals = g.get_vertex_centered_data(field)
             if sample_values is not None:
@@ -2496,20 +2506,24 @@
                 my_verts, svals = my_verts
                 samples.append(svals)
             verts.append(my_verts)
-        pb.finish()
-        verts = na.concatenate(verts)
+        verts = na.concatenate(verts).transpose()
+        verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
+        verts = verts.transpose()
         if sample_values is not None:
             samples = na.concatenate(samples)
+            samples = self.comm.par_combine_object(samples, op='cat',
+                                datatype='array')
         if rescale:
             mi = na.min(verts, axis=0)
             ma = na.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
-        if filename is not None:
+        if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
             for v1 in verts:
                 f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2]))
             for i in range(len(verts)/3):
                 f.write("f %s %s %s\n" % (i*3+1, i*3+2, i*3+3))
+            f.close()
         if sample_values is not None:
             return verts, samples
         return verts
@@ -2579,7 +2593,7 @@
         ...     "x-velocity", "y-velocity", "z-velocity", "Metal_Density")
         """
         flux = 0.0
-        for g in self._grids:
+        for g in self._get_grid_objs():
             mask = self._get_cut_mask(g) * g.child_mask
             vals = g.get_vertex_centered_data(field)
             if fluxing_field is None:
@@ -2590,6 +2604,7 @@
                          [field_x, field_y, field_z]]
             flux += march_cubes_grid_flux(value, vals, xv, yv, zv,
                         ff, mask, g.LeftEdge, g.dds)
+        flux = self.comm.mpi_allreduce(flux, op="sum")
         return flux
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
@@ -3235,7 +3250,7 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [grid[field] for field in fields]
+        g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
@@ -3292,39 +3307,36 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(field)
-        for field in fields_to_get:
-            grid_count = 0
-            if self.field_data.has_key(field):
-                continue
-            mylog.debug("Getting field %s from %s possible grids",
-                       field, len(self._grids))
-            if self._use_pbar: pbar = \
-                    get_pbar('Searching grids for values ', len(self._grids))
-            # Note that, thanks to some trickery, we have different dimensions
-            # on the field than one might think from looking at the dx and the
-            # L/R edges.
-            # We jump-start our task here
-            self._update_level_state(0, field)
-            
-            # The grids are assumed to be pre-sorted
-            last_level = 0
-            for gi, grid in enumerate(self._grids):
-                if self._use_pbar: pbar.update(gi)
-                if grid.Level > last_level and grid.Level <= self.level:
-                    self._update_level_state(last_level + 1)
-                    self._refine(1, field)
-                    last_level = grid.Level
-                self._get_data_from_grid(grid, field)
-            if self.level > 0:
+        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+        # Note that, thanks to some trickery, we have different dimensions
+        # on the field than one might think from looking at the dx and the
+        # L/R edges.
+        # We jump-start our task here
+        mylog.debug("Getting fields %s from %s possible grids",
+                   fields_to_get, len(self._grids))
+        self._update_level_state(0, fields_to_get)
+        if self._use_pbar: pbar = \
+                get_pbar('Searching grids for values ', len(self._grids))
+        # The grids are assumed to be pre-sorted
+        last_level = 0
+        for gi, grid in enumerate(self._grids):
+            if self._use_pbar: pbar.update(gi)
+            if grid.Level > last_level and grid.Level <= self.level:
+                self._update_level_state(last_level + 1)
+                self._refine(1, fields_to_get)
+                last_level = grid.Level
+            self._get_data_from_grid(grid, fields_to_get)
+        if self.level > 0:
+            for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-            if na.any(self[field] == -999):
-                # and self.dx < self.hierarchy.grids[0].dx:
-                n_bad = na.where(self[field]==-999)[0].size
-                mylog.error("Covering problem: %s cells are uncovered", n_bad)
-                raise KeyError(n_bad)
-            if self._use_pbar: pbar.finish()
-
-    def _update_level_state(self, level, field = None):
+                if na.any(self[field] == -999):
+                    # and self.dx < self.hierarchy.grids[0].dx:
+                    n_bad = (self[field]==-999).sum()
+                    mylog.error("Covering problem: %s cells are uncovered", n_bad)
+                    raise KeyError(n_bad)
+        if self._use_pbar: pbar.finish()
+
+    def _update_level_state(self, level, fields = None):
         dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
@@ -3337,16 +3349,20 @@
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
-            self.field_data[field] = na.zeros(idims,dtype='float64')-999
+            fields = ensure_list(fields)
+            for field in fields:
+                self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
-            self.field_data[field] = na.zeros(idims,dtype='float64')-999
+            fields = ensure_list(fields)
+            for field in fields:
+                self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
-    def _refine(self, dlevel, field):
+    def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
@@ -3355,16 +3371,17 @@
 
         self._cur_dims = output_dims
 
-        output_field = na.zeros(output_dims, dtype="float64")
-        output_left = self.global_startindex + 0.5
-        ghost_zone_interpolate(rf, self[field], input_left,
-                               output_field, output_left)
-        self[field] = output_field
+        for field in fields:
+            output_field = na.zeros(output_dims, dtype="float64")
+            output_left = self.global_startindex + 0.5
+            ghost_zone_interpolate(rf, self[field], input_left,
+                                   output_field, output_left)
+            self.field_data[field] = output_field
 
     def _get_data_from_grid(self, grid, fields):
         fields = ensure_list(fields)
-        g_fields = [grid[field] for field in fields]
-        c_fields = [self[field] for field in fields]
+        g_fields = [grid[field].astype("float64") for field in fields]
+        c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
             c_fields, g_fields, 
@@ -3381,7 +3398,7 @@
     existing regions.
     """
     _type_name = "boolean"
-    _con_args = {"regions"}
+    _con_args = ("regions")
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         """
         This will build a hybrid region based on the boolean logic


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -34,37 +34,17 @@
 
 from yt.funcs import *
 
-class FieldInfoContainer(object): # We are all Borg.
+class FieldInfoContainer(dict): # Resistance has utility
     """
     This is a generic field container.  It contains a list of potential derived
     fields, all of which know how to act on a data object and return a value.
     This object handles converting units as well as validating the availability
     of a given field.
+
     """
-    _shared_state = {}
-    _universal_field_list = {}
-    def __new__(cls, *args, **kwargs):
-        self = object.__new__(cls, *args, **kwargs)
-        self.__dict__ = cls._shared_state
-        return self
-    def __getitem__(self, key):
-        if key in self._universal_field_list:
-            return self._universal_field_list[key]
-        raise KeyError
-    def keys(self):
-        """ Return all the field names this object knows about. """
-        return self._universal_field_list.keys()
+    fallback = None
 
-    def __iter__(self):
-        return self._universal_field_list.iterkeys()
-
-    def __setitem__(self, key, val):
-        self._universal_field_list[key] = val
-
-    def has_key(self, key):
-        return key in self._universal_field_list
-
-    def add_field(self, name, function = None, **kwargs):
+    def add_field(self, name, function=None, **kwargs):
         """
         Add a new field, along with supplemental metadata, to the list of
         available fields.  This respects a number of arguments, all of which
@@ -79,6 +59,41 @@
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
 
+    def has_key(self, key):
+        # This gets used a lot
+        if key in self: return True
+        if self.fallback is None: return False
+        return self.fallback.has_key(key)
+
+    def __missing__(self, key):
+        if self.fallback is None:
+            raise KeyError("No field named %s" % key)
+        return self.fallback[key]
+
+    @classmethod
+    def create_with_fallback(cls, fallback):
+        obj = cls()
+        obj.fallback = fallback
+        return obj
+
+    def __contains__(self, key):
+        if dict.__contains__(self, key): return True
+        if self.fallback is None: return False
+        return self.fallback.has_key(key)
+
+    def __iter__(self):
+        for f in dict.__iter__(self): yield f
+        if self.fallback:
+            for f in self.fallback: yield f
+
+def TranslationFunc(field_name):
+    def _TranslationFunc(field, data):
+        return data[field_name]
+    return _TranslationFunc
+
+def NullFunc(field, data):
+    return
+
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
 
@@ -91,28 +106,6 @@
         return function
     return inner_decorator
 
-class CodeFieldInfoContainer(FieldInfoContainer):
-    def __setitem__(self, key, val):
-        self._field_list[key] = val
-
-    def __iter__(self):
-        return itertools.chain(self._field_list.iterkeys(),
-                               self._universal_field_list.iterkeys())
-
-    def keys(self):
-        return set(self._field_list.keys() + self._universal_field_list.keys())
-
-    def has_key(self, key):
-        return key in self._universal_field_list \
-            or key in self._field_list
-
-    def __getitem__(self, key):
-        if key in self._field_list:
-            return self._field_list[key]
-        if key in self._universal_field_list:
-            return self._universal_field_list[key]
-        raise KeyError(key)
-
 class ValidationException(Exception):
     pass
 
@@ -120,7 +113,6 @@
     def __init__(self, ghost_zones = 0, fields=None):
         self.ghost_zones = ghost_zones
         self.fields = fields
-
     def __str__(self):
         return "(%s, %s)" % (self.ghost_zones, self.fields)
 
@@ -131,21 +123,18 @@
 class NeedsDataField(ValidationException):
     def __init__(self, missing_fields):
         self.missing_fields = missing_fields
-
     def __str__(self):
         return "(%s)" % (self.missing_fields)
 
 class NeedsProperty(ValidationException):
     def __init__(self, missing_properties):
         self.missing_properties = missing_properties
-
     def __str__(self):
         return "(%s)" % (self.missing_properties)
 
 class NeedsParameter(ValidationException):
     def __init__(self, missing_parameters):
         self.missing_parameters = missing_parameters
-
     def __str__(self):
         return "(%s)" % (self.missing_parameters)
 
@@ -159,14 +148,16 @@
         self.nd = nd
         self.flat = flat
         self._spatial = not flat
-        self.ActiveDimensions = [nd, nd, nd]
+        self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
         self.dds = na.ones(3, "float64")
         self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
-        if pf is None:  # setup defaults
+
+        if pf is None:
+            # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
@@ -174,6 +165,7 @@
             pf.domain_right_edge = na.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
+
         class fake_hierarchy(object):
             class fake_io(object):
                 def _read_data_set(io_self, data, field):
@@ -194,47 +186,42 @@
             defaultdict.__init__(self, 
                 lambda: na.ones((nd * nd * nd), dtype='float64')
                 + 1e-4*na.random.random((nd * nd * nd)))
+
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(item) and \
-            FI[item]._function.func_name != '<lambda>':
+        if FI.has_key(item) and FI[item]._function.func_name != 'NullFunc':
             try:
                 vv = FI[item](self)
             except NeedsGridType as exc:
                 ngz = exc.ghost_zones
-                nfd = FieldDetector(self.nd+ngz*2)
+                nfd = FieldDetector(self.nd + ngz * 2)
                 nfd._num_ghost_zones = ngz
                 vv = FI[item](nfd)
                 if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]
-
                 for i in nfd.requested:
                     if i not in self.requested: self.requested.append(i)
-
                 for i in nfd.requested_parameters:
                     if i not in self.requested_parameters:
                         self.requested_parameters.append(i)
-
             if vv is not None:
                 if not self.flat: self[item] = vv
                 else: self[item] = vv.ravel()
                 return self[item]
-
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(field_name) and \
-           FI[field_name].particle_type:
+        if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
             return na.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity','center','height_vector']:
-            return na.random.random(3)*1e-2
+        if param in ['bulk_velocity', 'center', 'height_vector']:
+            return na.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0
@@ -258,40 +245,35 @@
         :param function: is a function handle that defines the field
         :param convert_function: must convert to CGS, if it needs to be done
         :param units: is a mathtext-formatted string that describes the field
-        :param projected_units: if we display a projection, what should the units be?
+        :param projected_units: if we display a projection, what should the
+                                units be?
         :param take_log: describes whether the field should be logged
         :param validators: is a list of :class:`FieldValidator` objects
         :param particle_type: is this field based on particles?
         :param vector_field: describes the dimensionality of the field
         :param display_field: governs its appearance in the dropdowns in reason
-        :param not_in_all: is used for baryon fields from the data that are not in
-                           all the grids
+        :param not_in_all: is used for baryon fields from the data that are not
+                           in all the grids
         :param display_name: a name used in the plots
         :param projection_conversion: which unit should we multiply by in a
                                       projection?
-
         """
         self.name = name
         self._function = function
-
         if validators:
             self.validators = ensure_list(validators)
         else:
             self.validators = []
-
         self.take_log = take_log
         self._units = units
         self._projected_units = projected_units
-
         if not convert_function:
             convert_function = lambda a: 1.0
         self._convert_function = convert_function
         self._particle_convert_function = particle_convert_function
-
         self.particle_type = particle_type
         self.vector_field = vector_field
         self.projection_conversion = projection_conversion
-
         self.display_field = display_field
         self.display_name = display_name
         self.not_in_all = not_in_all
@@ -300,7 +282,6 @@
         """
         This raises an exception of the appropriate type if the set of
         validation mechanisms are not met, and otherwise returns True.
-
         """
         for validator in self.validators:
             validator(data)
@@ -310,7 +291,6 @@
     def get_dependencies(self, *args, **kwargs):
         """
         This returns a list of names of fields that this field depends on.
-
         """
         e = FieldDetector(*args, **kwargs)
         if self._function.func_name == '<lambda>':
@@ -320,50 +300,43 @@
         return e
 
     def get_units(self):
-        """ Return a string describing the units.  """
+        """ Return a string describing the units. """
         return self._units
 
     def get_projected_units(self):
         """
         Return a string describing the units if the field has been projected.
-
         """
         return self._projected_units
 
     def __call__(self, data):
-        """ Return the value of the field in a given *data* object.  """
+        """ Return the value of the field in a given *data* object. """
         ii = self.check_available(data)
         original_fields = data.keys() # Copy
         dd = self._function(self, data)
         dd *= self._convert_function(data)
-
         for field_name in data.keys():
             if field_name not in original_fields:
                 del data[field_name]
-
         return dd
 
     def get_source(self):
         """
         Return a string containing the source of the function (if possible.)
-
         """
         return inspect.getsource(self._function)
 
     def get_label(self, projected=False):
         """
         Return a data label for the given field, inluding units.
-
         """
         name = self.name
         if self.display_name is not None: name = self.display_name
         data_label = r"$\rm{%s}" % name
-
         if projected: units = self.get_projected_units()
         else: units = self.get_units()
         if units != "": data_label += r"\/\/ (%s)" % (units)
         data_label += r"$"
-
         return data_label
 
     def particle_convert(self, data):
@@ -378,11 +351,9 @@
     def __init__(self, parameters):
         """
         This validator ensures that the parameter file has a given parameter.
-
         """
         FieldValidator.__init__(self)
         self.parameters = ensure_list(parameters)
-
     def __call__(self, data):
         doesnt_have = []
         for p in self.parameters:
@@ -395,13 +366,11 @@
 class ValidateDataField(FieldValidator):
     def __init__(self, field):
         """
-        This validator ensures that the output file has a given data field
-        stored in it.
-
+        This validator ensures that the output file has a given data field stored
+        in it.
         """
         FieldValidator.__init__(self)
         self.fields = ensure_list(field)
-
     def __call__(self, data):
         doesnt_have = []
         if isinstance(data, FieldDetector): return True
@@ -410,19 +379,15 @@
                 doesnt_have.append(f)
         if len(doesnt_have) > 0:
             raise NeedsDataField(doesnt_have)
-
         return True
 
 class ValidateProperty(FieldValidator):
     def __init__(self, prop):
         """
-        This validator ensures that the data object has a given python
-        attribute.
-
+        This validator ensures that the data object has a given python attribute.
         """
         FieldValidator.__init__(self)
         self.prop = ensure_list(prop)
-
     def __call__(self, data):
         doesnt_have = []
         for p in self.prop:
@@ -430,7 +395,6 @@
                 doesnt_have.append(p)
         if len(doesnt_have) > 0:
             raise NeedsProperty(doesnt_have)
-
         return True
 
 class ValidateSpatial(FieldValidator):
@@ -438,15 +402,13 @@
         """
         This validator ensures that the data handed to the field is of spatial
         nature -- that is to say, 3-D.
-
         """
         FieldValidator.__init__(self)
         self.ghost_zones = ghost_zones
         self.fields = fields
-
     def __call__(self, data):
-        # When we say spatial information, we really mean that it has a
-        # three-dimensional data structure
+        # When we say spatial information, we really mean
+        # that it has a three-dimensional data structure
         #if isinstance(data, FieldDetector): return True
         if not data._spatial:
             raise NeedsGridType(self.ghost_zones,self.fields)
@@ -459,10 +421,8 @@
         """
         This validator ensures that the data handed to the field is an actual
         grid patch, not a covering grid of any kind.
-
         """
         FieldValidator.__init__(self)
-
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
+Homepage: http://yt.enzotools.org/
 License:
   Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
 
@@ -25,10 +25,12 @@
 
 import exceptions
 import pdb
-import numpy as na
 import weakref
 
+import numpy as na
+
 from yt.funcs import *
+from yt.utilities.definitions import x_dict, y_dict
 
 from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.definitions import x_dict, y_dict
@@ -75,20 +77,21 @@
         if self.start_index is not None:
             return self.start_index
         if self.Parent == None:
-            iLE = self.LeftEdge - self.pf.domain_left_edge
-            start_index = iLE / self.dds
+            left = self.LeftEdge - self.pf.domain_left_edge
+            start_index = left / self.dds
             return na.rint(start_index).astype('int64').ravel()
+
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+        self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
-
     def get_field_parameter(self, name, default=None):
         """
-        This is typically only used by derived field functions, but
-        it returns parameters used to generate fields.
+        This is typically only used by derived field functions, but it returns
+        parameters used to generate fields.
+
         """
         if self.field_parameters.has_key(name):
             return self.field_parameters[name]
@@ -99,19 +102,19 @@
         """
         Here we set up dictionaries that get passed up and down and ultimately
         to derived fields.
+
         """
         self.field_parameters[name] = val
 
     def has_field_parameter(self, name):
-        """
-        Checks if a field parameter is set.
-        """
+        """ Checks if a field parameter is set. """
         return self.field_parameters.has_key(name)
 
     def convert(self, datatype):
         """
-        This will attempt to convert a given unit to cgs from code units.
-        It either returns the multiplicative factor or throws a KeyError.
+        This will attempt to convert a given unit to cgs from code units. It
+        either returns the multiplicative factor or throws a KeyError.
+
         """
         return self.pf[datatype]
 
@@ -119,7 +122,7 @@
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
-                       for i in self._con_args])
+                        for i in self._con_args])
         return s
 
     def _generate_field(self, field):
@@ -133,7 +136,7 @@
                 f_gz = ngt_exception.fields
                 gz_grid = self.retrieve_ghost_zones(n_gz, f_gz, smoothed=True)
                 temp_array = self.pf.field_info[field](gz_grid)
-                sl = [slice(n_gz,-n_gz)] * 3
+                sl = [slice(n_gz, -n_gz)] * 3
                 self[field] = temp_array[sl]
             else:
                 self[field] = self.pf.field_info[field](self)
@@ -166,14 +169,14 @@
     def keys(self):
         return self.field_data.keys()
 
-    def get_data(self, field):
+    def get_data(self, field, convert = True):
         """
         Returns a field or set of fields for a key or set of keys
         """
         if not self.field_data.has_key(field):
             if field in self.hierarchy.field_list:
                 conv_factor = 1.0
-                if self.pf.field_info.has_key(field):
+                if self.pf.field_info.has_key(field) and convert == True:
                     conv_factor = self.pf.field_info[field]._convert_function(self)
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
@@ -196,14 +199,14 @@
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        # that dx=dy=dz, at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if self.Parent is not None:
             self.dds = self.Parent.dds / self.pf.refine_by
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = na.array((RE - LE) / self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -226,6 +229,7 @@
         Generate a mask that shows which cells overlap with arbitrary arrays
         *LE* and *RE*) of edges, typically grids, along *axis*.
         Use algorithm described at http://www.gamedev.net/reference/articles/article735.asp
+
         """
         x = x_dict[axis]
         y = y_dict[axis]
@@ -243,8 +247,9 @@
 
     def clear_data(self):
         """
-        Clear out the following things: child_mask, child_indices,
-        all fields, all field parameters.
+        Clear out the following things: child_mask, child_indices, all fields,
+        all field parameters.
+
         """
         self._del_child_mask()
         self._del_child_indices()
@@ -255,9 +260,7 @@
         return self._child_mask, self._child_indices
 
     def _prepare_grid(self):
-        """
-        Copies all the appropriate attributes from the hierarchy
-        """
+        """ Copies all the appropriate attributes from the hierarchy. """
         # This is definitely the slowest part of generating the hierarchy
         # Now we give it pointers to all of its attributes
         # Note that to keep in line with Enzo, we have broken PEP-8
@@ -269,33 +272,27 @@
         h.grid_levels[my_ind, 0] = self.Level
         # This might be needed for streaming formats
         #self.Time = h.gridTimes[my_ind,0]
-        self.NumberOfParticles = h.grid_particle_count[my_ind,0]
+        self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
         return na.prod(self.ActiveDimensions)
 
     def find_max(self, field):
-        """
-        Returns value, index of maximum value of *field* in this gird
-        """
-        coord1d=(self[field]*self.child_mask).argmax()
-        coord=na.unravel_index(coord1d, self[field].shape)
+        """ Returns value, index of maximum value of *field* in this grid. """
+        coord1d = (self[field] * self.child_mask).argmax()
+        coord = na.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
-        """
-        Returns value, index of minimum value of *field* in this gird
-        """
-        coord1d=(self[field]*self.child_mask).argmin()
-        coord=na.unravel_index(coord1d, self[field].shape)
+        """ Returns value, index of minimum value of *field* in this grid. """
+        coord1d = (self[field] * self.child_mask).argmin()
+        coord = na.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def get_position(self, index):
-        """
-        Returns center position of an *index*
-        """
+        """ Returns center position of an *index*. """
         pos = (index + 0.5) * self.dds + self.LeftEdge
         return pos
 
@@ -303,6 +300,7 @@
         """
         Clears all datafields from memory and calls
         :meth:`clear_derived_quantities`.
+
         """
         for key in self.keys():
             del self.field_data[key]
@@ -313,9 +311,7 @@
         self.clear_derived_quantities()
 
     def clear_derived_quantities(self):
-        """
-        Clears coordinates, child_indices, child_mask.
-        """
+        """ Clears coordinates, child_indices, child_mask. """
         # Access the property raw-values here
         del self.child_mask
         del self.child_ind
@@ -368,10 +364,10 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by**(child.Level - self.Level)
+        rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi/rf - gi)
-        endIndex = na.minimum( (cgi+child.ActiveDimensions)/rf - gi,
+        startIndex = na.maximum(0, cgi / rf - gi)
+        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -381,7 +377,8 @@
     def __generate_child_mask(self):
         """
         Generates self.child_mask, which is zero where child grids exist (and
-        thus, where higher resolution data is available.)
+        thus, where higher resolution data is available).
+
         """
         self._child_mask = na.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
@@ -396,6 +393,7 @@
         """
         Generates self.child_index_mask, which is -1 where there is no child,
         and otherwise has the ID of the grid that resides there.
+
         """
         self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
@@ -410,10 +408,10 @@
         if self.__coords == None: self._generate_coords()
         return self.__coords
 
-    def _set_coords(self, newC):
+    def _set_coords(self, new_c):
         if self.__coords != None:
             mylog.warning("Overriding coords attribute!  This is probably unwise!")
-        self.__coords = newC
+        self.__coords = new_c
 
     def _del_coords(self):
         del self.__coords
@@ -421,12 +419,12 @@
 
     def _generate_coords(self):
         """
-        Creates self.coords, which is of dimensions (3,ActiveDimensions)
+        Creates self.coords, which is of dimensions (3, ActiveDimensions)
+
         """
-        #print "Generating coords"
         ind = na.indices(self.ActiveDimensions)
-        LE = na.reshape(self.LeftEdge,(3,1,1,1))
-        self['x'], self['y'], self['z'] = (ind+0.5)*self.dds+LE
+        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
     child_index_mask = property(fget=_get_child_index_mask, fdel=_del_child_index_mask)
@@ -437,9 +435,10 @@
         # We will attempt this by creating a datacube that is exactly bigger
         # than the grid by nZones*dx in each direction
         nl = self.get_global_startindex() - n_zones
-        nr = nl + self.ActiveDimensions + 2*n_zones
+        nr = nl + self.ActiveDimensions + 2 * n_zones
         new_left_edge = nl * self.dds + self.pf.domain_left_edge
         new_right_edge = nr * self.dds + self.pf.domain_left_edge
+
         # Something different needs to be done for the root grid, though
         level = self.Level
         if all_levels:
@@ -452,32 +451,17 @@
         # those of this grid.
         kwargs.update(self.field_parameters)
         if smoothed:
-            #cube = self.hierarchy.smoothed_covering_grid(
-            #    level, new_left_edge, new_right_edge, **kwargs)
             cube = self.hierarchy.smoothed_covering_grid(
                 level, new_left_edge, **kwargs)
         else:
-            cube = self.hierarchy.covering_grid(
-                level, new_left_edge, **kwargs)
+            cube = self.hierarchy.covering_grid(level, new_left_edge, **kwargs)
+
         return cube
 
-    def get_vertex_centered_data(self, field, smoothed=True,
-                                 no_ghost=False):
-        if not no_ghost:
-            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            # We have two extra zones in every direction
-            new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
-        else:
-            new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+    def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
+        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+
+        if no_ghost:
             of = self[field]
             new_field[:-1,:-1,:-1] += of
             new_field[:-1,:-1,1:] += of
@@ -493,13 +477,23 @@
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
-
             new_field[:,-1, :] = 2.0*new_field[:,-2,:] - new_field[:,-3,:]
             new_field[:,0, :]  = 2.0*new_field[:,1,:] - new_field[:,2,:]
-
             new_field[-1,:,:] = 2.0*new_field[-2,:,:] - new_field[-3,:,:]
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
+
             if self.pf.field_info[field].take_log:
                 na.power(10.0, new_field, new_field)
+        else:
+            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
+            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            na.multiply(new_field, 0.125, new_field)
+
         return new_field
-


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -35,12 +35,12 @@
 
 from yt.arraytypes import blankRecordArray
 from yt.config import ytcfg
+from yt.data_objects.field_info_container import NullFunc
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_splitter
-from object_finding_mixin import \
-    ObjectFindingMixin
+from object_finding_mixin import ObjectFindingMixin
 
 from .data_containers import data_object_registry
 
@@ -137,6 +137,32 @@
             self.proj = self.overlap_proj
         self.object_types.sort()
 
+    def _setup_unknown_fields(self):
+        known_fields = self.parameter_file._fieldinfo_known
+        for field in self.field_list:
+            # By allowing a backup, we don't mandate that it's found in our
+            # current field info.  This means we'll instead simply override
+            # it.
+            ff = self.parameter_file.field_info.pop(field, None)
+            if field not in known_fields:
+                rootloginfo("Adding unknown field %s to list of fields", field)
+                cf = None
+                if self.parameter_file.has_key(field):
+                    def external_wrapper(f):
+                        def _convert_function(data):
+                            return data.convert(f)
+                        return _convert_function
+                    cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D, 2D
+                # and 3D fields.
+                self.pf.field_info.add_field(
+                        field, NullFunc,
+                        convert_function=cf, take_log=False, units=r"Unknown")
+            else:
+                mylog.debug("Adding known field %s to list of fields", field)
+                self.parameter_file.field_info[field] = known_fields[field]
+            
     # Now all the object related stuff
 
     def all_data(self, find_max=False):


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -185,20 +185,21 @@
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
         mask = na.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
+        left_edge = na.array(left_edge)
+        right_edge = na.array(right_edge)
+        dw = dr - dl
+        left_dist = left_edge - dl
         db = right_edge - left_edge
         for off_x in [-1, 0, 1]:
             nle = left_edge.copy()
-            nre = left_edge.copy()
-            nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+            nle[0] = (dw[0]*off_x + dl[0]) + left_dist[0]
             for off_y in [-1, 0, 1]:
-                nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+                nle[1] = (dw[1]*off_y + dl[1]) + left_dist[1]
                 for off_z in [-1, 0, 1]:
-                    nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+                    nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
@@ -215,20 +216,21 @@
         return self.grids[mask], na.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level):
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
         mask = na.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
+        left_edge = na.array(left_edge)
+        right_edge = na.array(right_edge)
+        dw = dr - dl
+        left_dist = left_edge - dl
         db = right_edge - left_edge
         for off_x in [-1, 0, 1]:
             nle = left_edge.copy()
-            nre = left_edge.copy()
-            nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+            nle[0] = (dw[0]*off_x + dl[0]) + left_dist[0]
             for off_y in [-1, 0, 1]:
-                nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+                nle[1] = (dw[1]*off_y + dl[1]) + left_dist[1]
                 for off_z in [-1, 0, 1]:
-                    nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+                    nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
                     nre = nle + db
                     g, gi = self.get_box_grids_below_level(nle, nre, level)
                     mask[gi] = True


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -35,6 +35,8 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -96,6 +98,8 @@
                 pass
         self.print_key_parameters()
 
+        self.create_field_info()
+
     def __reduce__(self):
         args = (self._hash(),)
         return (_reconstruct_pf, args)
@@ -189,6 +193,17 @@
                 v = getattr(self, a)
                 mylog.info("Parameters: %-25s = %s", a, v)
 
+    def create_field_info(self):
+        if getattr(self, "field_info", None) is None:
+            # The setting up of fields occurs in the hierarchy, which is only
+            # instantiated once.  So we have to double check to make sure that,
+            # in the event of double-loads of a parameter file, we do not blow
+            # away the exising field_info.
+            self.field_info = FieldInfoContainer.create_with_fallback(
+                                self._fieldinfo_fallback)
+
+        
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -34,7 +34,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.amr_utils import CICDeposit_3
+from yt.utilities.amr_utils import CICDeposit_3, obtain_rvec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -139,88 +139,6 @@
 add_field("SoundSpeed", function=_SoundSpeed,
           units=r"\rm{cm}/\rm{s}")
 
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.hierarchy.io
-        if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
-        try:
-            return io._read_data_set(data, p_field).astype(dtype)
-        except io._read_exception:
-            pass
-        # This is bad.  But it's the best idea I have right now.
-        return data._read_data(p_field.replace("_"," ")).astype(dtype)
-    return _Particles
-for pf in ["type", "mass"] + \
-          ["position_%s" % ax for ax in 'xyz']:
-    pfunc = particle_func("particle_%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-def _convRetainInt(data):
-    return 1
-add_field("particle_index", function=particle_func("particle_index", "int64"),
-          validators = [ValidateSpatial(0)], particle_type=True,
-          convert_function=_convRetainInt)
-
-def _get_vel_convert(ax):
-    def _convert_p_vel(data):
-        return data.convert("%s-velocity" % ax)
-    return _convert_p_vel
-for ax in 'xyz':
-    pf = "particle_velocity_%s" % ax
-    pfunc = particle_func(pf)
-    cfunc = _get_vel_convert(ax)
-    add_field(pf, function=pfunc, convert_function=cfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
-    pfunc = particle_func(pf)
-    add_field(pf, function=pfunc,
-              validators = [ValidateSpatial(0),
-                            ValidateDataField(pf)],
-              particle_type=True)
-add_field("particle_mass", function=particle_func("particle_mass"),
-          validators=[ValidateSpatial(0)], particle_type=True)
-
-def _ParticleAge(field, data):
-    current_time = data.pf.current_time
-    return (current_time - data["creation_time"])
-def _convertParticleAge(data):
-    return data.convert("years")
-add_field("ParticleAge", function=_ParticleAge,
-          validators=[ValidateDataField("creation_time")],
-          particle_type=True, convert_function=_convertParticleAge)
-
-def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64') * \
-                just_one(data["CellVolumeCode"].ravel())
-    # Note that we mandate grid-type here, so this is okay
-    return particles
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
-    return cf
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
-    return cf
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMass,
-          particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_IOLevelParticleMassMsun)
-
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
     return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
@@ -510,7 +428,7 @@
     return new_field2
 add_field("AveragedDensity",
           function=_AveragedDensity,
-          validators=[ValidateSpatial(1)])
+          validators=[ValidateSpatial(1, ["Density"])])
 
 def _DivV(field, data):
     # We need to set up stencils
@@ -566,13 +484,6 @@
     zv = data["z-velocity"] - bv[2]
     return xv, yv, zv
 
-def obtain_rvec(data):
-    center = data.get_field_parameter('center')
-    coords = na.array([data['x'],data['y'],data['z']], dtype='float64')
-    new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    return r_vec # axis 0 is the x,y,z
-
 def _SpecificAngularMomentum(field, data):
     """
     Calculate the angular velocity.  Returns a vector for each cell.


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/art/api.py
--- a/yt/frontends/art/api.py
+++ b/yt/frontends/art/api.py
@@ -34,7 +34,6 @@
       ARTStaticOutput
 
 from .fields import \
-      ARTFieldContainer, \
       ARTFieldInfo, \
       add_art_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -37,8 +37,10 @@
       AMRHierarchy
 from yt.data_objects.static_output import \
       StaticOutput
-from .fields import ARTFieldContainer
-from .fields import add_field
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
     mpc_conversion
 from yt.utilities.io_handler import \
@@ -113,7 +115,6 @@
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
-        self.field_info = ARTFieldContainer()
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
@@ -346,20 +347,6 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -372,7 +359,8 @@
 
 class ARTStaticOutput(StaticOutput):
     _hierarchy_class = ARTHierarchy
-    _fieldinfo_class = ARTFieldContainer
+    _fieldinfo_fallback = ARTFieldInfo
+    _fieldinfo_known = KnownARTFields
     _handle = None
     
     def __init__(self, filename, data_style='art',
@@ -382,7 +370,6 @@
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
         
-        self.field_info = self._fieldinfo_class()
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'art'


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -24,7 +24,10 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    TranslationFunc, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -34,15 +37,11 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
 
-import pdb
+ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = ARTFieldInfo.add_field
 
-class ARTFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-ARTFieldInfo = ARTFieldContainer()
-add_art_field = ARTFieldInfo.add_field
-
-add_field = add_art_field
+KnownARTFields = FieldInfoContainer()
+add_art_field = KnownARTFields.add_field
 
 translation_dict = {"Density":"density",
                     "TotalEnergy":"TotalEnergy",
@@ -54,33 +53,28 @@
                     "GasEnergy":"GasEnergy"
                    }
 
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
-
 for f,v in translation_dict.items():
-    if v not in ARTFieldInfo:
-        add_field(v, function=lambda a,b: None, take_log=False,
+    add_art_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)])
-    #print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
+    add_art_field(f, function=TranslationFunc(v), take_log=True)
 
 #def _convertMetallicity(data):
 #    return data.convert("Metal_Density1")
-#ARTFieldInfo["Metal_Density1"]._units = r"1"
-#ARTFieldInfo["Metal_Density1"]._projected_units = r"1"
-#ARTFieldInfo["Metal_Density1"]._convert_function=_convertMetallicity
+#KnownARTFields["Metal_Density1"]._units = r"1"
+#KnownARTFields["Metal_Density1"]._projected_units = r"1"
+#KnownARTFields["Metal_Density1"]._convert_function=_convertMetallicity
 
 
 def _convertDensity(data):
     return data.convert("Density")
-ARTFieldInfo["Density"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["Density"]._convert_function=_convertDensity
+KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["Density"]._convert_function=_convertDensity
 
 def _convertEnergy(data):
     return data.convert("GasEnergy")
-ARTFieldInfo["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-ARTFieldInfo["GasEnergy"]._convert_function=_convertEnergy
+KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
+KnownARTFields["GasEnergy"]._convert_function=_convertEnergy
 
 def _Temperature(field, data):
     tr  = data["GasEnergy"] / data["Density"]
@@ -89,9 +83,9 @@
     return tr
 def _convertTemperature(data):
     return data.convert("Temperature")
-add_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
-ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_convertTemperature
+add_art_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
+KnownARTFields["Temperature"]._units = r"\mathrm{K}"
+KnownARTFields["Temperature"]._convert_function=_convertTemperature
 
 def _MetallicitySNII(field, data):
     #get the dimensionless mass fraction
@@ -99,8 +93,8 @@
     tr *= data.pf.conversion_factors["Density"]    
     return tr
     
-add_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
-ARTFieldInfo["MetallicitySNII"]._units = r"\mathrm{K}"
+add_art_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
+KnownARTFields["MetallicitySNII"]._units = r"\mathrm{K}"
 
 def _MetallicitySNIa(field, data):
     #get the dimensionless mass fraction
@@ -108,8 +102,8 @@
     tr *= data.pf.conversion_factors["Density"]    
     return tr
     
-add_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
-ARTFieldInfo["MetallicitySNIa"]._units = r"\mathrm{K}"
+add_art_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
+KnownARTFields["MetallicitySNIa"]._units = r"\mathrm{K}"
 
 def _Metallicity(field, data):
     #get the dimensionless mass fraction of the total metals
@@ -118,14 +112,14 @@
     tr *= data.pf.conversion_factors["Density"]    
     return tr
     
-add_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
-ARTFieldInfo["Metallicity"]._units = r"\mathrm{K}"
+add_art_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
+KnownARTFields["Metallicity"]._units = r"\mathrm{K}"
 
 def _Metal_Density(field,data):
     return data["Metal_DensitySNII"]+data["Metal_DensitySNIa"]
 def _convert_Metal_Density(data):
     return data.convert("Metal_Density")
 
-add_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
-ARTFieldInfo["Metal_Density"]._units = r"\mathrm{K}"
-ARTFieldInfo["Metal_Density"]._convert_function=_convert_Metal_Density
+add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
+KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
+KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ b/yt/frontends/castro/api.py
@@ -34,7 +34,6 @@
       CastroStaticOutput
 
 from .fields import \
-      CastroFieldContainer, \
       CastroFieldInfo, \
       add_castro_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -27,27 +27,19 @@
 import os
 import weakref
 import itertools
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
 import numpy as na
 
-from collections import \
-    defaultdict
-from string import \
-    strip, \
-    rstrip
-from stat import \
-    ST_CTIME
-
 from yt.funcs import *
-from yt.data_objects.grid_patch import \
-           AMRGridPatch
-from yt.data_objects.hierarchy import \
-           AMRHierarchy
-from yt.data_objects.static_output import \
-           StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion
-from yt.utilities.amr_utils import \
-    get_box_grids_level
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.data_objects.hierarchy import AMRHierarchy
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import mpc_conversion
+from yt.utilities.amr_utils import get_box_grids_level
 
 from .definitions import \
     castro2enzoDict, \
@@ -56,39 +48,40 @@
     castro_FAB_header_pattern, \
     castro_particle_field_names, \
     boxlib_bool_to_int
-
 from .fields import \
-    CastroFieldContainer, \
-    add_field
+    CastroFieldInfo, \
+    KnownCastroFields, \
+    add_castro_field
 
 
 class CastroGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions, start, stop, paranoia=False,**kwargs):
-        AMRGridPatch.__init__(self, index,**kwargs)
+
+    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
+                 dimensions, start, stop, paranoia=False, **kwargs):
+        super(CastroGrid, self).__init__(self, index, **kwargs)
         self.filename = filename
         self._offset = offset
-        self._paranoid = paranoia
+        self._paranoid = paranoia  # TODO: Factor this behavior out in tests
 
-        # should error check this
+        ### TODO: error check this (test)
         self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
         self.start_index = start.copy()#.transpose()
         self.stop_index = stop.copy()#.transpose()
         self.LeftEdge  = LeftEdge.copy()
         self.RightEdge = RightEdge.copy()
         self.index = index
-        self.Level = level
+        self.level = level
 
     def get_global_startindex(self):
         return self.start_index
 
     def _prepare_grid(self):
-        """
-        Copies all the appropriate attributes from the hierarchy
-        """
-        # This is definitely the slowest part of generating the hierarchy
+        """ Copies all the appropriate attributes from the hierarchy. """
+        # This is definitely the slowest part of generating the hierarchy.
         # Now we give it pointers to all of its attributes
         # Note that to keep in line with Enzo, we have broken PEP-8
+
         h = self.hierarchy # cache it
         #self.StartIndices = h.gridStartIndices[self.id]
         #self.EndIndices = h.gridEndIndices[self.id]
@@ -100,6 +93,7 @@
         self.field_indexes = h.field_indexes
         self.Children = h.gridTree[self.id]
         pIDs = h.gridReverseTree[self.id]
+
         if len(pIDs) > 0:
             self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
         else:
@@ -115,6 +109,7 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
+
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -124,86 +119,90 @@
 
 class CastroHierarchy(AMRHierarchy):
     grid = CastroGrid
+
     def __init__(self, pf, data_style='castro_native'):
-        self.field_info = CastroFieldContainer()
+        super(CastroHierarchy, self).__init__(self, pf, self.data_style)
+
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir, 'Header')
         self.directory = pf.fullpath
         self.data_style = data_style
-        #self._setup_classes()
 
         # This also sets up the grid objects
-        self.read_global_header(header_filename, self.parameter_file.paranoid_read)
+        self.read_global_header(header_filename,
+                                self.parameter_file.paranoid_read) 
         self.read_particle_header()
-        self.__cache_endianness(self.levels[-1].grids[-1])
-        AMRHierarchy.__init__(self, pf, self.data_style)
+        self._cache_endianness(self.levels[-1].grids[-1])
         self._setup_data_io()
         self._setup_field_list()
         self._populate_hierarchy()
 
     def read_global_header(self, filename, paranoid_read):
-        """
-        read the global header file for an Castro plotfile output.
-        """
+        """ Read the global header file for an Castro plotfile output. """
         counter = 0
-        header_file = open(filename,'r')
-        self.__global_header_lines = header_file.readlines()
+        header_file = open(filename, 'r')
+        self._global_header_lines = header_file.readlines()
 
         # parse the file
-        self.castro_version = self.__global_header_lines[0].rstrip()
-        self.n_fields      = int(self.__global_header_lines[1])
+        self.castro_version = self._global_header_lines[0].rstrip()
+        self.n_fields = int(self._global_header_lines[1])
 
-        counter = self.n_fields+2
+        counter = self.n_fields + 2
         self.field_list = []
-        for i, line in enumerate(self.__global_header_lines[2:counter]):
+        for i, line in enumerate(self._global_header_lines[2:counter]):
             self.field_list.append(line.rstrip())
 
         # this is unused...eliminate it?
         #for f in self.field_indexes:
         #    self.field_list.append(castro2ytFieldsDict.get(f, f))
 
-        self.dimension = int(self.__global_header_lines[counter])
+        self.dimension = int(self._global_header_lines[counter])
         if self.dimension != 3:
             raise RunTimeError("Castro must be in 3D to use yt.")
+
         counter += 1
-        self.Time = float(self.__global_header_lines[counter])
+        self.Time = float(self._global_header_lines[counter])
         counter += 1
-        self.finest_grid_level = int(self.__global_header_lines[counter])
+        self.finest_grid_level = int(self._global_header_lines[counter])
         self.n_levels = self.finest_grid_level + 1
         counter += 1
+
         # quantities with _unnecessary are also stored in the inputs
         # file and are not needed.  they are read in and stored in
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int, self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
+        #na.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
-        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
-        #domain_re.search(self.__global_header_lines[counter]).groups()
+        self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
+        #domain_re.search(self._global_header_lines[counter]).groups()
         counter += 1
-        self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
+        self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+
+        self.dx = na.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
             self.dx[i] = na.array(map(float, line.split()))
         counter += self.n_levels
-        self.geometry = int(self.__global_header_lines[counter])
+        self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
             raise RunTimeError("yt only supports cartesian coordinates.")
         counter += 1
 
         # this is just to debug. eventually it should go away.
-        linebreak = int(self.__global_header_lines[counter])
+        linebreak = int(self._global_header_lines[counter])
         if linebreak != 0:
-            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+            raise RunTimeError("INTERNAL ERROR! Header is unexpected size")
         counter += 1
 
-        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        # Each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        # BoxLib madness
         self.levels = []
         grid_counter = 0
         file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
@@ -214,45 +213,50 @@
         data_files_finder = re.compile(data_files_pattern)
 
         for level in range(0, self.n_levels):
-            tmp = self.__global_header_lines[counter].split()
-            # should this be grid_time or level_time??
+            tmp = self._global_header_lines[counter].split()
+            # Should this be grid_time or level_time??
             lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
             counter += 1
-            nsteps = int(self.__global_header_lines[counter])
+            nsteps = int(self._global_header_lines[counter])
             counter += 1
             self.levels.append(CastroLevel(lev, ngrids))
-            # open level header, extract file names and offsets for
-            # each grid
-            # read slightly out of order here: at the end of the lo, hi
-            # pairs for x, y, z is a *list* of files types in the Level
-            # directory. each type has Header and a number of data
-            # files (one per processor)
+            # Open level header, extract file names and offsets for each grid.
+            # Read slightly out of order here: at the end of the lo, hi pairs
+            # for x, y, z is a *list* of files types in the Level directory. 
+            # Each type has Header and a number of data files
+            # (one per processor)
             tmp_offset = counter + 3*ngrids
             nfiles = 0
             key_off = 0
             files =   {} # dict(map(lambda a: (a,[]), self.field_list))
             offsets = {} # dict(map(lambda a: (a,[]), self.field_list))
-            while nfiles+tmp_offset < len(self.__global_header_lines) and data_files_finder.match(self.__global_header_lines[nfiles+tmp_offset]):
-                filen = os.path.join(self.parameter_file.fullplotdir, \
-                                     self.__global_header_lines[nfiles+tmp_offset].strip())
+
+            while (nfiles + tmp_offset < len(self._global_header_lines) and
+                   data_files_finder.match(self._global_header_lines[nfiles+tmp_offset])):
+                filen = os.path.join(self.parameter_file.fullplotdir,
+                                     self._global_header_lines[nfiles+tmp_offset].strip())
                 # open each "_H" header file, and get the number of
                 # components within it
                 level_header_file = open(filen+'_H','r').read()
                 start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
                 grid_file_offset = re_file_finder.findall(level_header_file)
                 ncomp_this_file = int(level_header_file.split('\n')[2])
+
                 for i in range(ncomp_this_file):
                     key = self.field_list[i+key_off]
                     f, o = zip(*grid_file_offset)
                     files[key] = f
                     offsets[key] = o
                     self.field_indexes[key] = i
+
                 key_off += ncomp_this_file
                 nfiles += 1
+
             # convert dict of lists to list of dicts
             fn = []
             off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,'Level_%i'%level)
+            lead_path = os.path.join(self.parameter_file.fullplotdir,
+                                     'Level_%i' % level)
             for i in range(ngrids):
                 fi = [os.path.join(lead_path, files[key][i]) for key in self.field_list]
                 of = [int(offsets[key][i]) for key in self.field_list]
@@ -262,21 +266,25 @@
             for grid in range(0, ngrids):
                 gfn = fn[grid]  # filename of file containing this grid
                 gfo = off[grid] # offset within that file
-                xlo, xhi = map(float, self.__global_header_lines[counter].split())
-                counter+=1
-                ylo, yhi = map(float, self.__global_header_lines[counter].split())
-                counter+=1
-                zlo, zhi = map(float, self.__global_header_lines[counter].split())
-                counter+=1
+                xlo, xhi = map(float, self._global_header_lines[counter].split())
+                counter += 1
+                ylo, yhi = map(float, self._global_header_lines[counter].split())
+                counter += 1
+                zlo, zhi = map(float, self._global_header_lines[counter].split())
+                counter += 1
                 lo = na.array([xlo, ylo, zlo])
                 hi = na.array([xhi, yhi, zhi])
-                dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter, level, gfn, gfo, dims, start, stop, paranoia=paranoid_read, hierarchy=self))
-                grid_counter += 1 # this is global, and shouldn't be reset
-                                  # for each level
+                dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
+                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
+                                                       level, gfn, gfo, dims,
+                                                       start, stop,
+                                                       paranoia=paranoid_read,  ### TODO: at least the code isn't schizophrenic paranoid
+                                                       hierarchy=self))
+                grid_counter += 1   # this is global, and shouldn't be reset
+                                    # for each level
 
             # already read the filenames above...
-            counter+=nfiles
+            counter += nfiles
             self.num_grids = grid_counter
             self.float_type = 'float64'
 
@@ -289,53 +297,55 @@
         if not self.parameter_file.use_particles:
             self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
             return
+
         self.field_list += castro_particle_field_names[:]
-        header = open(os.path.join(self.parameter_file.fullplotdir,
-                        "DM", "Header"))
+        header = open(os.path.join(self.parameter_file.fullplotdir, "DM",
+                                   "Header"))
         version = header.readline()
         ndim = header.readline()
         nfields = header.readline()
         ntotalpart = int(header.readline())
         dummy = header.readline() # nextid
         maxlevel = int(header.readline()) # max level
+
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
         grid_info = na.fromiter((int(i)
-                    for line in header.readlines()
-                    for i in line.split()
-                    ),
-            dtype='int64', count=3*self.num_grids).reshape((self.num_grids, 3))
+                                 for line in header.readlines()
+                                 for i in line.split()),
+                                dtype='int64',
+                                count=3*self.num_grids).reshape((self.num_grids, 3))
         self.pgrid_info = grid_info
 
-    def __cache_endianness(self, test_grid):
+    def _cache_endianness(self, test_grid):
         """
-        Cache the endianness and bytes perreal of the grids by using a
-        test grid and assuming that all grids have the same
-        endianness. This is a pretty safe assumption since Castro uses
-        one file per processor, and if you're running on a cluster
-        with different endian processors, then you're on your own!
+        Cache the endianness and bytes perreal of the grids by using a test grid
+        and assuming that all grids have the same endianness. This is a pretty
+        safe assumption since Castro uses one file per processor, and if you're
+        running on a cluster with different endian processors, then you're on
+        your own!
+
         """
-        # open the test file & grab the header
-        inFile = open(os.path.expanduser(test_grid.filename[self.field_list[0]]),'rb')
-        header = inFile.readline()
-        inFile.close()
+        # open the test file and grab the header
+        in_file = open(os.path.expanduser(test_grid.filename[self.field_list[0]]), 'rb')
+        header = in_file.readline()
+        in_file.close()
         header.strip()
-
-        # parse it. the patter is in CastroDefs.py
-        headerRe = re.compile(castro_FAB_header_pattern)
-        bytesPerReal, endian, start, stop, centerType, nComponents = headerRe.search(header).groups()
-        self._bytesPerReal = int(bytesPerReal)
-        if self._bytesPerReal == int(endian[0]):
+        # Parse it. The pattern is in castro.definitions.py
+        header_re = re.compile(castro_FAB_header_pattern)
+        bytes_per_real, endian, start, stop, centerType, n_components = header_re.search(header).groups()
+        self._bytes_per_real = int(bytes_per_real)
+        if self._bytes_per_real == int(endian[0]):
             dtype = '<'
-        elif self._bytesPerReal == int(endian[-1]):
+        elif self._bytes_per_real == int(endian[-1]):
             dtype = '>'
         else:
             raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
 
-        dtype += ('f%i' % self._bytesPerReal) # always a floating point
+        dtype += ('f%i' % self._bytes_per_real) # always a floating point
         self._dtype = dtype
 
-    def __calculate_grid_dimensions(self, start_stop):
+    def _calculate_grid_dimensions(self, start_stop):
         start = na.array(map(int, start_stop[0].split(',')))
         stop = na.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
@@ -343,21 +353,28 @@
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
+
         self.grids = na.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
+
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(
                 basedir, "DM", "Level_%s" % (g.Level), "DATA_%04i" % pg[0])
             g.NumberOfParticles = pg[1]
             g._particle_offset = pg[2]
+
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
-        gls = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+
+        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels], axis=0)
+        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+                                  for level in self.levels], axis=0)
+
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
+
         left_edges = []
         right_edges = []
         dims = []
@@ -365,23 +382,28 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
+
         self.grid_left_edge = na.array(left_edges)
         self.grid_right_edge = na.array(right_edges)
         self.grid_dimensions = na.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
+
         mylog.debug("Done creating grid objects")
 
     def _populate_hierarchy(self):
-        self.__setup_grid_tree()
+        self._setup_grid_tree()
         #self._setup_grid_corners()
+
         for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            if (i % 1e4) == 0:
+                mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+
             grid._prepare_grid()
             grid._setup_dx()
 
-    def __setup_grid_tree(self):
+    def _setup_grid_tree(self):
         mask = na.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
@@ -409,16 +431,20 @@
 
     def _setup_field_list(self):
         self.derived_field_list = []
+
         for field in self.field_info:
             try:
-                fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
+                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
             except:
                 continue
+
             available = na.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
+
         for field in self.field_list:
             if field not in self.derived_field_list:
                 self.derived_field_list.append(field)
+
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -428,16 +454,18 @@
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
-                # Note that we call add_field on the field_info directly.  This
+                # Note that we call add_castro_field on the field_info directly.  This
                 # will allow the same field detection mechanism to work for 1D, 2D
                 # and 3D fields.
-                self.pf.field_info.add_field(
+                self.pf.field_info.add_castro_field(
                         field, lambda a, b: None,
                         convert_function=cf, take_log=False,
                         particle_type=True)
 
+    ### TODO: check if this can be removed completely
     def _count_grids(self):
-        """this is already provided in
+        """
+        this is already provided in ???
 
         """
         pass
@@ -456,21 +484,6 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
-
     def _setup_derived_fields(self):
         pass
 
@@ -489,19 +502,21 @@
         self.ngrids = ngrids
         self.grids = []
 
-
 class CastroStaticOutput(StaticOutput):
     """
-    This class is a stripped down class that simply reads and parses
-    *filename*, without looking at the Castro hierarchy.
+    This class is a stripped down class that simply reads and parses *filename*,
+    without looking at the Castro hierarchy.
+
     """
     _hierarchy_class = CastroHierarchy
-    _fieldinfo_class = CastroFieldContainer
+    _fieldinfo_fallback = CastroFieldInfo
+    _fieldinfo_known = KnownCastroFields
 
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='castro_native', paranoia=False,
                  storage_filename = None):
-        """need to override for Castro file structure.
+        """
+        Need to override for Castro file structure.
 
         the paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -512,6 +527,8 @@
          * ASCII (not implemented in yt)
 
         """
+        super(CastroStaticOutput, self).__init__(self, plotname.rstrip("/"),
+                                                 data_style='castro_native')
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia
         self.parameter_filename = paramFilename
@@ -520,13 +537,10 @@
 
         self.fparameters = {}
 
-        StaticOutput.__init__(self, plotname.rstrip("/"),
-                              data_style='castro_native')
-        self.field_info = self._fieldinfo_class()
-
         # These should maybe not be hardcoded?
+        ### TODO: this.
         self.parameters["HydroMethod"] = 'castro' # always PPM DE
-        self.parameters["Time"] = 1. # default unit is 1...
+        self.parameters["Time"] = 1.0 # default unit is 1...
         self.parameters["DualEnergyFormalism"] = 0 # always off.
         self.parameters["EOSType"] = -1 # default
 
@@ -543,13 +557,17 @@
         # fill our args
         pname = args[0].rstrip("/")
         dn = os.path.dirname(pname)
-        if len(args) > 1: kwargs['paramFilename'] = args[1]
+        if len(args) > 1:
+            kwargs['paramFilename'] = args[1]
+
         pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
 
         # We check for the job_info file's existence because this is currently
         # what distinguishes Castro data from MAESTRO data.
+        ### ^ that is nuts
         pfn = os.path.join(pfname)
-        if not os.path.exists(pfn): return False
+        if not os.path.exists(pfn):
+            return False
         castro = any(("castro." in line for line in open(pfn)))
         nyx = any(("nyx." in line for line in open(pfn)))
         castro = castro and (not nyx) # it's only castro if it's not nyx
@@ -559,35 +577,37 @@
 
     def _parse_parameter_file(self):
         """
-        Parses the parameter file and establishes the various
-        dictionaries.
+        Parses the parameter file and establishes the various dictionaries.
+
         """
+        # Boxlib madness
         self.fullplotdir = os.path.abspath(self.parameter_filename)
         self._parse_header_file()
-        self.parameter_filename = self._localize(
-                self.__ipfn, 'inputs')
-        self.fparameter_filename = self._localize(
-                self.fparameter_filename, 'probin')
+        self.parameter_filename = self._localize(self.__ipfn, 'inputs')
+        self.fparameter_filename = self._localize(self.fparameter_filename, 'probin')
         if os.path.isfile(self.fparameter_filename):
             self._parse_fparameter_file()
             for param in self.fparameters:
                 if castro2enzoDict.has_key(param):
-                    self.parameters[castro2enzoDict[param]]=self.fparameters[param]
+                    self.parameters[castro2enzoDict[param]] = self.fparameters[param]
+
         # Let's read the file
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
         lines = open(self.parameter_filename).readlines()
         self.use_particles = False
-        for lineI, line in enumerate(lines):
+
+        for line in lines:
             if line.find("#") >= 1: # Keep the commented lines...
-                line=line[:line.find("#")]
-            line=line.strip().rstrip()
+                line = line[:line.find("#")]
+            line = line.strip().rstrip()
             if len(line) < 2 or line.find("#") == 0: # ...but skip comments
                 continue
+
             try:
                 param, vals = map(strip, map(rstrip, line.split("=")))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
+
             if castro2enzoDict.has_key(param):
                 paramName = castro2enzoDict[param]
                 t = map(parameterDict[paramName], vals.split())
@@ -598,13 +618,10 @@
                         self.parameters[paramName] = t[0]
                     else:
                         self.parameters[paramName] = t
-
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = na.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = na.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 
@@ -613,33 +630,38 @@
         self.domain_dimensions = self.parameters["TopGridDimensions"]
         self.refine_by = self.parameters.get("RefineBy", 2)
 
-        if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):
+        if (self.parameters.has_key("ComovingCoordinates") and
+            bool(self.parameters["ComovingCoordinates"])):
             self.cosmological_simulation = 1
             self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
             self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
             self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
-            a_file = open(os.path.join(self.fullplotdir,'comoving_a'))
+
+            # Stupid that we have to read a separate file for this :/
+            a_file = open(os.path.join(self.fullplotdir, "comoving_a"))
             line = a_file.readline().strip()
             a_file.close()
-            self.parameters["CosmologyCurrentRedshift"] = 1/float(line) - 1
+
+            self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
             self.cosmological_scale_factor = float(line)
             self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
         else:
+            ### TODO: make these defaults automatic
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
     def _parse_fparameter_file(self):
         """
-        Parses the fortran parameter file for Castro. Most of this will
-        be useless, but this is where it keeps mu = mass per
-        particle/m_hydrogen.
+        Parses the fortran parameter file for Castro. Most of this will be
+        useless, but this is where it keeps mu = mass per particle/m_hydrogen.
+
         """
         lines = open(self.fparameter_filename).readlines()
         for line in lines:
             if line.count("=") == 1:
                 param, vals = map(strip, map(rstrip, line.split("=")))
                 if vals.count("'") == 0:
-                    t = map(float,[a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                    t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
                 else:
                     t = vals.split()
                 if len(t) == 1:
@@ -649,36 +671,39 @@
 
     def _parse_header_file(self):
         """
-        Parses the BoxLib header file to get any parameters stored
-        there. Hierarchy information is read out of this file in
-        CastroHierarchy.
+        Parses the BoxLib header file to get any parameters stored there.
+        Hierarchy information is read out of this file in CastroHierarchy. 
 
         Currently, only Time is read here.
+
         """
-        header_file = open(os.path.join(self.fullplotdir,'Header'))
+        header_file = open(os.path.join(self.fullplotdir, "Header"))
         lines = header_file.readlines()
         header_file.close()
         n_fields = int(lines[1])
-        self.current_time = float(lines[3+n_fields])
-
-
+        self.current_time = float(lines[3 + n_fields])
 
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical _units based on the
+        parameter file.
+
         """
         self.units = {}
         self.time_units = {}
+
         if len(self.parameters) == 0:
             self._parse_parameter_file()
+
         if self.cosmological_simulation:
-            cf = 1e5*(self.cosmological_scale_factor)
+            cf = 1e5 * self.cosmological_scale_factor   # Where does the 1e5 come from?
             for ax in 'xyz':
                 self.units['particle_velocity_%s' % ax] = cf
-            self.units['particle_mass'] = 1.989e33
+            self.units['particle_mass'] = 1.989e33  ### TODO: Make a global solar mass def
+
         mylog.warning("Setting 1.0 in code units to be 1.0 cm")
         if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            mylog.warning("No time units. Setting 1.0 = 1 second.")
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
@@ -688,8 +713,8 @@
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         seconds = 1 #self["Time"]
-        self.time_units['years'] = seconds / (365*3600*24.0)
-        self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['years'] = seconds / (365 * 3600 * 24.0)
+        self.time_units['days']  = seconds / (3600 * 24.0)
         for key in yt2castroFieldsDict:
             self.conversion_factors[key] = 1.0
         for key in castro_particle_field_names:


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/castro/fields.py
--- a/yt/frontends/castro/fields.py
+++ b/yt/frontends/castro/fields.py
@@ -21,106 +21,99 @@
 
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
 """
-from yt.utilities.physical_constants import \
-    mh, kboltz
+
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    TranslationFunc, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import mh, kboltz
 
-class CastroFieldContainer(CodeFieldInfoContainer):
-    """
-    All Castro-specific fields are stored in here.
-    """
-    _shared_state = {}
-    _field_list = {}
-CastroFieldInfo = CastroFieldContainer()
-add_castro_field = CastroFieldInfo.add_field
+translation_dict = {
+    "x-velocity": "xvel",
+    "y-velocity": "yvel",
+    "z-velocity": "zvel",
+    "Density": "density",
+    "Total_Energy": "eden",
+    "Temperature": "temperature",
+    "x-momentum": "xmom",
+    "y-momentum": "ymom",
+    "z-momentum": "zmom"
+}
 
+# Setup containers for fields possibly in the output files
+KnownCastroFields = FieldInfoContainer()
+add_castro_field = KnownCastroFields.add_field
 
-add_field = add_castro_field
+# and always derived ones
+CastroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CastroFieldInfo.add_field
 
-# def _convertDensity(data):
-#     return data.convert("Density")
-add_field("density", function=lambda a, b: None, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
-CastroFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-#CastroFieldInfo["density"]._convert_function=_convertDensity
+# Start adding fields
+add_castro_field("density", function=NullFunc, take_log=True,
+                 units=r"\rm{g}/\rm{cm}^3")
 
-add_field("eden", function=lambda a, b: None, take_log=True,
-          validators = [ValidateDataField("eden")],
-          units=r"\rm{erg}/\rm{cm}^3")
+# fix projected units
+KnownCastroFields["density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
-add_field("xmom", function=lambda a, b: None, take_log=False,
-          validators = [ValidateDataField("xmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_castro_field("eden", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("eden")],
+                 units=r"\rm{erg}/\rm{cm}^3")
 
-add_field("ymom", function=lambda a, b: None, take_log=False,
-          validators = [ValidateDataField("ymom")],
-          units=r"\rm{gm}/\rm{cm^2\ s}")
+add_castro_field("xmom", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("xmom")],
+                 units=r"\rm{g}/\rm{cm^2\ s}")
 
-add_field("zmom", function=lambda a, b: None, take_log=False,
-          validators = [ValidateDataField("zmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_castro_field("ymom", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("ymom")],
+                 units=r"\rm{gm}/\rm{cm^2\ s}")
 
-translation_dict = {"x-velocity": "xvel",
-                    "y-velocity": "yvel",
-                    "z-velocity": "zvel",
-                    "Density": "density",
-                    "Total_Energy": "eden",
-                    "Temperature": "temperature",
-                    "x-momentum": "xmom",
-                    "y-momentum": "ymom",
-                    "z-momentum": "zmom"
-                   }
+add_castro_field("zmom", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("zmom")],
+                 units=r"\rm{g}/\rm{cm^2\ s}")
 
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
+# Now populate derived fields
+for mine, theirs in translation_dict.items():
+    if KnownCastroFields.has_key(theirs):
+        add_field(theirs, function=TranslationFunc(mine),
+                  take_log=KnownCastroFields[theirs].take_log)
 
-for f, v in translation_dict.items():
-    if v not in CastroFieldInfo:
-        add_field(v, function=lambda a, b: None, take_log=False,
-                  validators = [ValidateDataField(v)])
-    #print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
+# Now fallbacks, in case these fields are not output
+def _xVelocity(field, data):
+    """ Generate x-velocity from x-momentum and density. """
+    return data["xmom"] / data["density"]
 
-def _xVelocity(field, data):
-    """generate x-velocity from x-momentum and density
-
-    """
-    return data["xmom"]/data["density"]
 add_field("x-velocity", function=_xVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
 
 def _yVelocity(field, data):
-    """generate y-velocity from y-momentum and density
+    """ Generate y-velocity from y-momentum and density. """
+    return data["ymom"] / data["density"]
 
-    """
-    #try:
-    #    return data["xvel"]
-    #except KeyError:
-    return data["ymom"]/data["density"]
 add_field("y-velocity", function=_yVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
 
 def _zVelocity(field, data):
-    """generate z-velocity from z-momentum and density
+    """ Generate z-velocity from z-momentum and density. """
+    return data["zmom"] / data["density"]
 
-    """
-    return data["zmom"]/data["density"]
 add_field("z-velocity", function=_zVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
 
 def _ThermalEnergy(field, data):
-    """generate thermal (gas energy). Dual Energy Formalism was
-        implemented by Stella, but this isn't how it's called, so I'll
-        leave that commented out for now.
+    """
+    Generate thermal (gas energy). Dual Energy Formalism was implemented by
+    Stella, but this isn't how it's called, so I'll leave that commented out for
+    now.
+
     """
     #if data.pf["DualEnergyFormalism"]:
     #    return data["Gas_Energy"]
@@ -129,26 +122,59 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
+
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field, data):
-    """M{(Gamma-1.0)*e, where e is thermal energy density
-       NB: this will need to be modified for radiation
     """
-    return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
+    M{(Gamma-1.0)*e, where e is thermal energy density
+    
+    NB: this will need to be modified for radiation
+
+    """
+    return (data.pf["Gamma"] - 1.0) * data["ThermalEnergy"]
+
 add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field, data):
-    return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}", take_log=False)
+    return ((data.pf["Gamma"] - 1.0) * data.pf["mu"] * mh *
+            data["ThermalEnergy"] / (kboltz * data["Density"]))
+
+add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}",
+          take_log=False)
 
 def _convertParticleMassMsun(data):
-    return 1.0/1.989e33
+    return 1.0 / 1.989e33
 def _ParticleMassMsun(field, data):
     return data["particle_mass"]
+
 add_field("ParticleMassMsun",
           function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
           particle_type=True, convert_function=_convertParticleMassMsun,
           particle_convert_function=_ParticleMassMsun)
 
+# Fundamental fields that are usually/always output:
+#   density
+#   xmom
+#   ymom
+#   zmom
+#   rho_E
+#   rho_e
+#   Temp
+#
+# "Derived" fields that are sometimes output:
+#   x_velocity
+#   y_velocity
+#   z_velocity
+#   magvel
+#   grav_x
+#   grav_y
+#   grav_z
+#   maggrav
+#   magvort
+#   pressure
+#   entropy
+#   divu
+#   eint_e (e as derived from the "rho e" variable)
+#   eint_E (e as derived from the "rho E" variable)


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -34,7 +34,6 @@
       ChomboStaticOutput
 
 from .fields import \
-      ChomboFieldContainer, \
       ChomboFieldInfo, \
       add_chombo_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -55,7 +55,9 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_root_only
 
-from .fields import ChomboFieldContainer
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import ChomboFieldInfo, KnownChomboFields
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -92,7 +94,6 @@
         self.domain_left_edge = pf.domain_left_edge # need these to determine absolute grid locations
         self.domain_right_edge = pf.domain_right_edge # need these to determine absolute grid locations
         self.data_style = data_style
-        self.field_info = ChomboFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
@@ -162,9 +163,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -176,7 +174,8 @@
 
 class ChomboStaticOutput(StaticOutput):
     _hierarchy_class = ChomboHierarchy
-    _fieldinfo_class = ChomboFieldContainer
+    _fieldinfo_fallback = ChomboFieldInfo
+    _fieldinfo_known = KnownChomboFields
     
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
@@ -185,7 +184,6 @@
         self.ini_filename = ini_filename
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
-        self.field_info = self._fieldinfo_class()
         
     def _set_units(self):
         """


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -24,7 +24,9 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,47 +34,48 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class ChomboFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-ChomboFieldInfo = ChomboFieldContainer()
+KnownChomboFields = FieldInfoContainer()
+add_chombo_field = KnownChomboFields.add_field
+
+ChomboFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_chombo_field = ChomboFieldInfo.add_field
 
 add_field = add_chombo_field
 
-add_field("density", function=lambda a,b: None, take_log=True,
-          validators=[ValidateDataField("density")],
-          units=r"\rm{g} / \rm{cm}^3")
+add_field("density", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("density")],
+          units=r"\rm{g}/\rm{cm}^3")
+
 ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-add_field("X-momentum", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("X-Momentum")],
-          units=r"", display_name=r"x momentum")
+add_field("X-momentum", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("X-Momentum")],
+          units=r"",display_name=r"B_x")
 ChomboFieldInfo["X-momentum"]._projected_units=r""
 
-add_field("Y-momentum", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Y-Momentum")],
-          units=r"", display_name=r"y momentum")
+add_field("Y-momentum", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Y-Momentum")],
+          units=r"",display_name=r"B_y")
 ChomboFieldInfo["Y-momentum"]._projected_units=r""
 
-add_field("Z-momentum", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Z-Momentum")],
-          units=r"", display_name=r"z momentum")
+add_field("Z-momentum", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Z-Momentum")],
+          units=r"",display_name=r"B_z")
 ChomboFieldInfo["Z-momentum"]._projected_units=r""
 
-add_field("X-magnfield", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("X-Magnfield")],
-          units=r"", display_name=r"B_x")
+add_field("X-magnfield", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("X-Magnfield")],
+          units=r"",display_name=r"B_x")
 ChomboFieldInfo["X-magnfield"]._projected_units=r""
 
-add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Y-Magnfield")],
-          units=r"", display_name=r"B_y")
+add_field("Y-magnfield", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Y-Magnfield")],
+          units=r"",display_name=r"B_y")
 ChomboFieldInfo["Y-magnfield"]._projected_units=r""
 
-add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Z-Magnfield")],
-          units=r"", display_name=r"B_z")
+add_field("Z-magnfield", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Z-Magnfield")],
+          units=r"",display_name=r"B_z")
 ChomboFieldInfo["Z-magnfield"]._projected_units=r""
 
 def _MagneticEnergy(field,data):


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -39,8 +39,9 @@
       EnzoStaticOutputInMemory
 
 from .fields import \
-      EnzoFieldContainer, \
       EnzoFieldInfo, \
+      Enzo2DFieldInfo, \
+      Enzo1DFieldInfo, \
       add_enzo_field, \
       add_enzo_1d_field, \
       add_enzo_2d_field


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -45,13 +45,17 @@
     AMRHierarchy
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.definitions import mpc_conversion
 from yt.utilities import hdf5_light_reader
 from yt.utilities.logger import ytLogger as mylog
 
 from .definitions import parameterDict
-from .fields import EnzoFieldContainer, Enzo1DFieldContainer, \
-    Enzo2DFieldContainer, add_enzo_field
+from .fields import \
+    EnzoFieldInfo, Enzo2DFieldInfo, Enzo1DFieldInfo, \
+    add_enzo_field, add_enzo_2d_field, add_enzo_1d_field, \
+    KnownEnzoFields
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
@@ -127,6 +131,56 @@
     def set_filename(self, filename):
         pass
 
+class EnzoGridGZ(EnzoGrid):
+
+    __slots__ = ()
+
+    def retrieve_ghost_zones(self, n_zones, fields, all_levels=False,
+                             smoothed=False):
+        # We ignore smoothed in this case.
+        if n_zones > 3:
+            return EnzoGrid.retrieve_ghost_zones(
+                self, n_zones, fields, all_levels, smoothed)
+        # ----- Below is mostly the original code, except we remove the field
+        # ----- access section
+        # We will attempt this by creating a datacube that is exactly bigger
+        # than the grid by nZones*dx in each direction
+        nl = self.get_global_startindex() - n_zones
+        nr = nl + self.ActiveDimensions + 2*n_zones
+        new_left_edge = nl * self.dds + self.pf.domain_left_edge
+        new_right_edge = nr * self.dds + self.pf.domain_left_edge
+        # Something different needs to be done for the root grid, though
+        level = self.Level
+        args = (level, new_left_edge, new_right_edge)
+        kwargs = {'dims': self.ActiveDimensions + 2*n_zones,
+                  'num_ghost_zones':n_zones,
+                  'use_pbar':False}
+        # This should update the arguments to set the field parameters to be
+        # those of this grid.
+        kwargs.update(self.field_parameters)
+        if smoothed:
+            #cube = self.hierarchy.smoothed_covering_grid(
+            #    level, new_left_edge, new_right_edge, **kwargs)
+            cube = self.hierarchy.smoothed_covering_grid(
+                level, new_left_edge, **kwargs)
+        else:
+            cube = self.hierarchy.covering_grid(
+                level, new_left_edge, **kwargs)
+        # ----- This is EnzoGrid.get_data, duplicated here mostly for
+        # ----  efficiency's sake.
+        sl = [slice(3 - n_zones, -(3 - n_zones)) for i in range(3)]
+        if fields is None: return cube
+        for field in ensure_list(fields):
+            if field in self.hierarchy.field_list:
+                conv_factor = 1.0
+                if self.pf.field_info.has_key(field):
+                    conv_factor = self.pf.field_info[field]._convert_function(self)
+                if self.pf.field_info[field].particle_type: continue
+                temp = self.hierarchy.io._read_raw_data_set(self, field)
+                temp = temp.swapaxes(0, 2)
+                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+        return cube
+
 class EnzoHierarchy(AMRHierarchy):
 
     _strip_path = False
@@ -205,7 +259,11 @@
                 list_of_sets = []
             if len(list_of_sets) == 0 and rank == 3:
                 mylog.debug("Detected packed HDF5")
-                self.data_style = 'enzo_packed_3d'
+                if self.parameters.get("WriteGhostZones", 0) == 1:
+                    self.data_style= "enzo_packed_3d_gz"
+                    self.grid = EnzoGridGZ
+                else:
+                    self.data_style = 'enzo_packed_3d'
             elif len(list_of_sets) > 0 and rank == 3:
                 mylog.debug("Detected unpacked HDF5")
                 self.data_style = 'enzo_hdf5'
@@ -254,7 +312,9 @@
                     self.__pointer_handler(vv)
         pbar.finish()
         self._fill_arrays(ei, si, LE, RE, np)
-        self.grids = na.array(self.grids, dtype='object')
+        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids[:] = self.grids
+        self.grids = temp_grids
         self.filenames = fn
         self._store_binary_hierarchy()
         t2 = time.time()
@@ -406,25 +466,6 @@
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            # Note that we call add_field on the field_info directly.  This
-            # will allow the same field detection mechanism to work for 1D, 2D
-            # and 3D fields.
-            self.pf.field_info.add_field(
-                    field, lambda a, b: None,
-                    convert_function=cf, take_log=False)
-            
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
         for field in self.parameter_file.field_info:
@@ -631,7 +672,8 @@
     Enzo-specific output, set at a fixed time.
     """
     _hierarchy_class = EnzoHierarchy
-    _fieldinfo_class = EnzoFieldContainer
+    _fieldinfo_fallback = EnzoFieldInfo
+    _fieldinfo_known = KnownEnzoFields
     def __init__(self, filename, data_style=None,
                  file_style = None,
                  parameter_override = None,
@@ -674,11 +716,9 @@
         if self["TopGridRank"] == 1: self._setup_1d()
         elif self["TopGridRank"] == 2: self._setup_2d()
 
-        self.field_info = self._fieldinfo_class()
-
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
-        self._fieldinfo_class = Enzo1DFieldContainer
+        self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
             na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
@@ -686,7 +726,7 @@
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
-        self._fieldinfo_class = Enzo2DFieldContainer
+        self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
             na.concatenate([self["DomainLeftEdge"], [0.0]])
         self.domain_right_edge = \
@@ -938,8 +978,6 @@
 
         StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
 
-        self.field_info = self._fieldinfo_class()
-
     def _parse_parameter_file(self):
         enzo = self._obtain_enzo()
         self.basename = "cycle%08i" % (


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -26,7 +26,10 @@
 import numpy as na
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -35,42 +38,43 @@
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
     mh
+from yt.funcs import *
+
 import yt.utilities.amr_utils as amr_utils
 
-class EnzoFieldContainer(CodeFieldInfoContainer):
-    """
-    This is a container for Enzo-specific fields.
-    """
-    _shared_state = {}
-    _field_list = {}
-EnzoFieldInfo = EnzoFieldContainer()
-add_enzo_field = EnzoFieldInfo.add_field
+EnzoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = EnzoFieldInfo.add_field
 
-add_field = add_enzo_field
+KnownEnzoFields = FieldInfoContainer()
+add_enzo_field = KnownEnzoFields.add_field
 
-_speciesList = ["HI","HII","Electron",
-               "HeI","HeII","HeIII",
-               "H2I","H2II","HM",
-               "DI","DII","HDI","Metal","PreShock"]
-_speciesMass = {"HI":1.0,"HII":1.0,"Electron":1.0,
-                "HeI":4.0,"HeII":4.0,"HeIII":4.0,
-                "H2I":2.0,"H2II":2.0,"HM":1.0,
-                "DI":2.0,"DII":2.0,"HDI":3.0}
+_speciesList = ["HI", "HII", "Electron",
+                "HeI", "HeII", "HeIII",
+                "H2I", "H2II", "HM",
+                "DI", "DII", "HDI", "Metal", "PreShock"]
+_speciesMass = {"HI": 1.0, "HII": 1.0, "Electron": 1.0,
+                "HeI": 4.0, "HeII": 4.0, "HeIII": 4.0,
+                "H2I": 2.0, "H2II": 2.0, "HM": 1.0,
+                "DI": 2.0, "DII": 2.0, "HDI": 3.0}
 
 def _SpeciesComovingDensity(field, data):
     sp = field.name.split("_")[0] + "_Density"
     ef = (1.0 + data.pf.current_redshift)**3.0
-    return data[sp]/ef
+    return data[sp] / ef
+
 def _SpeciesFraction(field, data):
     sp = field.name.split("_")[0] + "_Density"
-    return data[sp]/data["Density"]
+    return data[sp] / data["Density"]
+
 def _SpeciesMass(field, data):
     sp = field.name.split("_")[0] + "_Density"
     return data[sp] * data["CellVolume"]
+
 def _SpeciesNumberDensity(field, data):
     species = field.name.split("_")[0]
     sp = field.name.split("_")[0] + "_Density"
-    return data[sp]/_speciesMass[species]
+    return data[sp] / _speciesMass[species]
+
 def _convertCellMassMsun(data):
     return 5.027854e-34 # g^-1
 def _ConvertNumberDensity(data):
@@ -118,10 +122,10 @@
           validators=ValidateDataField("SN_Colour"),
           projection_conversion="1")
 
-add_field("Cooling_Time", units=r"\rm{s}",
-          function=lambda a, b: None,
-          validators=ValidateDataField("Cooling_Time"),
-          projection_conversion="1")
+add_enzo_field("Cooling_Time", units=r"\rm{s}",
+               function=NullFunc,
+               validators=ValidateDataField("Cooling_Time"),
+               projection_conversion="1")
 
 def _ThermalEnergy(field, data):
     if data.pf["HydroMethod"] == 2:
@@ -154,7 +158,9 @@
 def _convertEnergy(data):
     return data.convert("x-velocity")**2.0
 
-add_field("GasEnergy", function=lambda a, b: None,
+add_enzo_field("GasEnergy", function=NullFunc,
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+add_enzo_field("Gas_Energy", function=NullFunc,
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Gas_Energy(field, data):
@@ -162,7 +168,12 @@
 add_field("Gas_Energy", function=_Gas_Energy,
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
-add_field("TotalEnergy", function=lambda a, b: None,
+# We set up fields for both TotalEnergy and Total_Energy in the known fields
+# lists.  Note that this does not mean these will be the used definitions.
+add_enzo_field("TotalEnergy", function=NullFunc,
+          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+add_enzo_field("Total_Energy", function=NullFunc,
           display_name = "\mathrm{Total}\/\mathrm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
@@ -221,38 +232,46 @@
 
 for field in _default_fields:
     dn = field.replace("_","\/")
-    add_field(field, function=lambda a, b: None, take_log=True,
+    add_enzo_field(field, function=NullFunc, take_log=True,
               display_name = dn,
-              validators=[ValidateDataField(field)], units=r"\rm{g}/\rm{cm}^3")
-EnzoFieldInfo["x-velocity"].projection_conversion='1'
-EnzoFieldInfo["y-velocity"].projection_conversion='1'
-EnzoFieldInfo["z-velocity"].projection_conversion='1'
+              validators=[ValidateDataField(field)], units=r"Unknown")
+KnownEnzoFields["x-velocity"].projection_conversion='1'
+KnownEnzoFields["y-velocity"].projection_conversion='1'
+KnownEnzoFields["z-velocity"].projection_conversion='1'
+
+def _convertBfield(data): 
+    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+for field in ['Bx','By','Bz']:
+    f = KnownEnzoFields[field]
+    f._convert_function=_convertBfield
+    f._units=r"\mathrm{Gau\ss}"
+    f.take_log=False
 
 # Now we override
 
 def _convertDensity(data):
     return data.convert("Density")
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ]:
-    EnzoFieldInfo[field]._units = r"\rm{g}/\rm{cm}^3"
-    EnzoFieldInfo[field]._projected_units = r"\rm{g}/\rm{cm}^2"
-    EnzoFieldInfo[field]._convert_function=_convertDensity
+    KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
+    KnownEnzoFields[field]._projected_units = r"\rm{g}/\rm{cm}^2"
+    KnownEnzoFields[field]._convert_function=_convertDensity
 
-add_field("Dark_Matter_Density", function=lambda a,b: None,
+add_enzo_field("Dark_Matter_Density", function=NullFunc,
           convert_function=_convertDensity,
           validators=[ValidateDataField("Dark_Matter_Density"),
                       ValidateSpatial(0)],
           display_name = "Dark\ Matter\ Density",
           not_in_all = True)
 
-EnzoFieldInfo["Temperature"]._units = r"\rm{K}"
-EnzoFieldInfo["Temperature"].units = r"K"
-EnzoFieldInfo["Dust_Temperature"]._units = r"\rm{K}"
-EnzoFieldInfo["Dust_Temperature"].units = r"K"
+KnownEnzoFields["Temperature"]._units = r"\rm{K}"
+KnownEnzoFields["Temperature"].units = r"K"
+KnownEnzoFields["Dust_Temperature"]._units = r"\rm{K}"
+KnownEnzoFields["Dust_Temperature"].units = r"K"
 
 def _convertVelocity(data):
     return data.convert("x-velocity")
 for ax in ['x','y','z']:
-    f = EnzoFieldInfo["%s-velocity" % ax]
+    f = KnownEnzoFields["%s-velocity" % ax]
     f._units = r"\rm{cm}/\rm{s}"
     f._convert_function = _convertVelocity
     f.take_log = False
@@ -378,7 +397,7 @@
 def _convertBfield(data): 
     return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
-    f = EnzoFieldInfo[field]
+    f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
     f._units=r"\mathrm{Gauss}"
     f.take_log=False
@@ -390,17 +409,95 @@
 
 add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
 
+# Particle functions
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return na.array([], dtype=dtype)
+        try:
+            return io._read_data_set(data, p_field).astype(dtype)
+        except io._read_exception:
+            pass
+        # This is bad.  But it's the best idea I have right now.
+        return data._read_data(p_field.replace("_"," ")).astype(dtype)
+    return _Particles
+for pf in ["type", "mass"] + \
+          ["position_%s" % ax for ax in 'xyz']:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_enzo_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
     
+def _convRetainInt(data):
+    return 1
+add_enzo_field("particle_index", function=particle_func("particle_index", "int64"),
+          validators = [ValidateSpatial(0)], particle_type=True,
+          convert_function=_convRetainInt)
+
+def _get_vel_convert(ax):
+    def _convert_p_vel(data):
+        return data.convert("%s-velocity" % ax)
+    return _convert_p_vel
+for ax in 'xyz':
+    pf = "particle_velocity_%s" % ax
+    pfunc = particle_func(pf)
+    cfunc = _get_vel_convert(ax)
+    add_enzo_field(pf, function=pfunc, convert_function=cfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
+    pfunc = particle_func(pf)
+    add_enzo_field(pf, function=pfunc,
+              validators = [ValidateSpatial(0),
+                            ValidateDataField(pf)],
+              particle_type=True)
+add_field("particle_mass", function=particle_func("particle_mass"),
+          validators=[ValidateSpatial(0)], particle_type=True)
+
+def _ParticleAge(field, data):
+    current_time = data.pf.current_time
+    return (current_time - data["creation_time"])
+def _convertParticleAge(data):
+    return data.convert("years")
+add_field("ParticleAge", function=_ParticleAge,
+          validators=[ValidateDataField("creation_time")],
+          particle_type=True, convert_function=_convertParticleAge)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64') * \
+                just_one(data["CellVolumeCode"].ravel())
+    # Note that we mandate grid-type here, so this is okay
+    return particles
+
+def _convertParticleMass(data):
+    return data.convert("Density")*(data.convert("cm")**3.0)
+def _IOLevelParticleMass(grid):
+    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
+    return cf
+def _convertParticleMassMsun(data):
+    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
+def _IOLevelParticleMassMsun(grid):
+    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
+    return cf
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True, convert_function=_convertParticleMass,
+          particle_convert_function=_IOLevelParticleMass)
+add_field("ParticleMassMsun",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True, convert_function=_convertParticleMassMsun,
+          particle_convert_function=_IOLevelParticleMassMsun)
+
 #
 # Now we do overrides for 2D fields
 #
 
-class Enzo2DFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = EnzoFieldContainer._field_list.copy()
-# We make a copy of the dict from the other, so we
-# can now update it...
-Enzo2DFieldInfo = Enzo2DFieldContainer()
+Enzo2DFieldInfo = FieldInfoContainer.create_with_fallback(EnzoFieldInfo)
 add_enzo_2d_field = Enzo2DFieldInfo.add_field
 
 def _CellArea(field, data):
@@ -438,12 +535,7 @@
 # Now we do overrides for 1D fields
 #
 
-class Enzo1DFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = EnzoFieldContainer._field_list.copy()
-# We make a copy of the dict from the other, so we
-# can now update it...
-Enzo1DFieldInfo = Enzo1DFieldContainer()
+Enzo1DFieldInfo = FieldInfoContainer.create_with_fallback(EnzoFieldInfo)
 add_enzo_1d_field = Enzo1DFieldInfo.add_field
 
 def _CellLength(field, data):
@@ -474,7 +566,7 @@
 def _convertBfield(data): 
     return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
-    f = EnzoFieldInfo[field]
+    f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
     f._units=r"\mathrm{Gauss}"
     f.take_log=False


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -181,8 +181,8 @@
         mylog.debug("Finished read of %s", sets)
 
     def _read_data_set(self, grid, field):
-        return hdf5_light_reader.ReadData(grid.filename,
-                "/Grid%08i/%s" % (grid.id, field)).swapaxes(0,2)
+        return self.modify(hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field)))
 
     def _read_data_slice(self, grid, field, axis, coord):
         axis = _axis_ids[axis]
@@ -197,6 +197,22 @@
     def _read_exception(self):
         return (exceptions.KeyError, hdf5_light_reader.ReadingError)
 
+class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
+    _data_style = "enzo_packed_3d_gz"
+
+    def modify(self, field):
+        tr = field[3:-3,3:-3,3:-3].swapaxes(0,2)
+        return tr.copy() # To ensure contiguous
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        axis = _axis_ids[axis]
+        return hdf5_light_reader.ReadDataSlice(grid.filename, "/Grid%08i/%s" %
+                        (grid.id, field), axis, coord)[3:-3,3:-3].transpose()
+
+    def _read_raw_data_set(self, grid, field):
+        return hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field))
+
 class IOHandlerInMemory(BaseIOHandler):
 
     _data_style = "enzo_inline"


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -34,7 +34,6 @@
       FLASHStaticOutput
 
 from .fields import \
-      FLASHFieldContainer, \
       FLASHFieldInfo, \
       add_flash_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -40,9 +40,8 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import \
-    FLASHFieldContainer, \
-    add_field
+from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -60,24 +59,19 @@
 class FLASHHierarchy(AMRHierarchy):
 
     grid = FLASHGrid
-    _handle = None
     
-    def __init__(self,pf,data_style='chombo_hdf5'):
+    def __init__(self,pf,data_style='flash_hdf5'):
         self.data_style = data_style
-        self.field_info = FLASHFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._handle = h5py.File(self.hierarchy_filename)
+        self._handle = pf._handle
 
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
-        self._handle.close()
-        self._handle = None
-
     def _initialize_data_storage(self):
         pass
 
@@ -102,7 +96,7 @@
     def _count_grids(self):
         try:
             self.num_grids = self.parameter_file._find_parameter(
-                "integer", "globalnumblocks", True, self._handle)
+                "integer", "globalnumblocks", True)
         except KeyError:
             self.num_grids = self._handle["/simulation parameters"][0][0]
         
@@ -114,9 +108,9 @@
         self.grid_right_edge[:] = f["/bounding box"][:,:,1]
         # Move this to the parameter file
         try:
-            nxb = pf._find_parameter("integer", "nxb", True, f)
-            nyb = pf._find_parameter("integer", "nyb", True, f)
-            nzb = pf._find_parameter("integer", "nzb", True, f)
+            nxb = pf._find_parameter("integer", "nxb", True)
+            nyb = pf._find_parameter("integer", "nyb", True)
+            nzb = pf._find_parameter("integer", "nzb", True)
         except KeyError:
             nxb, nyb, nzb = [int(f["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
@@ -152,22 +146,6 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            pfield = field.startswith("particle_")
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False,
-                      particle_type=pfield)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
         for field in self.parameter_file.field_info:
@@ -187,20 +165,21 @@
 
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
-    _fieldinfo_class = FLASHFieldContainer
+    _fieldinfo_fallback = FLASHFieldInfo
+    _fieldinfo_known = KnownFLASHFields
     _handle = None
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
                  conversion_override = None):
 
+        self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-        self.field_info = self._fieldinfo_class()
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.
@@ -273,26 +252,17 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
-    def _find_parameter(self, ptype, pname, scalar = False, handle = None):
-        # We're going to implement handle caching eventually
-        if handle is None:
-            close = False
-            handle = self._handle
-        if handle is None:
-            close = True
-            handle = h5py.File(self.parameter_filename, "r")
+    def _find_parameter(self, ptype, pname, scalar = False):
         nn = "/%s %s" % (ptype,
                 {False: "runtime parameters", True: "scalars"}[scalar])
-        for tpname, pval in handle[nn][:]:
+        for tpname, pval in self._handle[nn][:]:
             if tpname.strip() == pname:
                 return pval
-        if close: handle.close()
         raise KeyError(pname)
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self._handle = h5py.File(self.parameter_filename, "r")
         if "file format version" in self._handle:
             self._flash_version = int(
                 self._handle["file format version"][:])
@@ -308,15 +278,15 @@
 
         # Determine domain dimensions
         try:
-            nxb = self._find_parameter("integer", "nxb", scalar = True, handle = self._handle)
-            nyb = self._find_parameter("integer", "nyb", scalar = True, handle = self._handle)
-            nzb = self._find_parameter("integer", "nzb", scalar = True, handle = self._handle)
+            nxb = self._find_parameter("integer", "nxb", scalar = True)
+            nyb = self._find_parameter("integer", "nyb", scalar = True)
+            nzb = self._find_parameter("integer", "nzb", scalar = True)
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
-        nblockx = self._find_parameter("integer", "nblockx", handle = self._handle)
-        nblocky = self._find_parameter("integer", "nblockx", handle = self._handle)
-        nblockz = self._find_parameter("integer", "nblockx", handle = self._handle)
+        nblockx = self._find_parameter("integer", "nblockx")
+        nblocky = self._find_parameter("integer", "nblockx")
+        nblockz = self._find_parameter("integer", "nblockx")
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 
@@ -342,14 +312,16 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def __del__(self):
         self._handle.close()
-        self._handle = None
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
             fileh = h5py.File(args[0],'r')
             if "bounding box" in fileh["/"].keys():
+                fileh.close()
                 return True
         except:
             pass


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -24,7 +24,8 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,13 +33,12 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class FLASHFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-FLASHFieldInfo = FLASHFieldContainer()
-add_flash_field = FLASHFieldInfo.add_field
 
-add_field = add_flash_field
+KnownFLASHFields = FieldInfoContainer()
+add_flash_field = KnownFLASHFields.add_field
+
+FLASHFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = FLASHFieldInfo.add_field
 
 # Common fields in FLASH: (Thanks to John ZuHone for this list)
 #


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -38,7 +38,7 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
         # Now we cache the particle fields
         self.pf = pf
-        self._handle = h5py.File(self.pf.parameter_filename, "r")
+        self._handle = pf._handle
         try :
             particle_fields = [s[0].strip() for s in
                                self._handle["/particle names"][:]]
@@ -47,9 +47,6 @@
         except KeyError:
             self._particle_fields = {}
 
-    def __del__(self):
-        self._handle.close()
-            
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
         pass


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gadget/api.py
--- a/yt/frontends/gadget/api.py
+++ b/yt/frontends/gadget/api.py
@@ -34,7 +34,6 @@
       GadgetStaticOutput
 
 from .fields import \
-      GadgetFieldContainer, \
       GadgetFieldInfo, \
       add_gadget_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -37,7 +37,9 @@
 from yt.data_objects.static_output import \
     StaticOutput
 
-from .fields import GadgetFieldContainer
+from .fields import GadgetFieldInfo, KnownGadgetFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 
 class GadgetGrid(AMRGridPatch):
     _id_offset = 0
@@ -69,7 +71,6 @@
     grid = GadgetGrid
 
     def __init__(self, pf, data_style='gadget_hdf5'):
-        self.field_info = GadgetFieldContainer()
         self.filename = pf.filename
         self.directory = os.path.dirname(pf.filename)
         self.data_style = data_style
@@ -135,19 +136,16 @@
             g._prepare_grid()
             g._setup_dx()
             
-        
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
 class GadgetStaticOutput(StaticOutput):
     _hierarchy_class = GadgetHierarchy
-    _fieldinfo_class = GadgetFieldContainer
+    _fieldinfo_fallback = GadgetFieldInfo
+    _fieldinfo_known = KnownGadgetFields
+
     def __init__(self, filename,storage_filename=None) :
         self.storage_filename = storage_filename
-        self.field_info = self._fieldinfo_class()
         self.filename = filename
         
         StaticOutput.__init__(self, filename, 'gadget_infrastructure')


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -27,7 +27,8 @@
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -35,10 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class GadgetFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-GadgetFieldInfo = GadgetFieldContainer()
+GadgetFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_gadget_field = GadgetFieldInfo.add_field
 
 add_field = add_gadget_field


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -27,16 +27,17 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-
 from .data_structures import \
-      ChomboGrid, \
-      ChomboHierarchy, \
-      ChomboStaticOutput
+      GDFGrid, \
+      GDFHierarchy, \
+      GDFStaticOutput
 
 from .fields import \
-      ChomboFieldContainer, \
-      ChomboFieldInfo, \
-      add_chombo_field
+      GDFFieldInfo, \
+      KnownGDFFields, \
+      add_gdf_field
 
 from .io import \
-      IOHandlerChomboHDF5
+      IOHandlerGDFHDF5
+
+


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -24,6 +24,9 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import h5py
+import numpy as na
+import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
            AMRGridPatch
@@ -32,7 +35,10 @@
 from yt.data_objects.static_output import \
            StaticOutput
 
-from .fields import GDFFieldContainer
+from .fields import GDFFieldInfo, KnownGDFFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+import pdb
 
 class GDFGrid(AMRGridPatch):
     _id_offset = 0
@@ -66,6 +72,7 @@
     
     def __init__(self, pf, data_style='grid_data_format'):
         self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
@@ -78,8 +85,7 @@
         pass
 
     def _detect_fields(self):
-        ncomp = int(self._fhandle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+        self.field_list = self._fhandle['field_types'].keys()
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -87,37 +93,31 @@
         self.object_types.sort()
 
     def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._fhandle[lev]['Processors'].len()
+        self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
         
     def _parse_hierarchy(self):
-        f = self._fhandle # shortcut
+        f = self._fhandle 
         
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
         levels = f.listnames()[1:]
-        self.grids = []
-        i = 0
-        for lev in levels:
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            boxes = f[lev]['boxes'].value
-            dx = f[lev].attrs['dx']
-            for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(self.grids),self,level=level_number,
-                               start = si, stop = ei)
-                self.grids.append(pg)
-                self.grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type) + 1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        temp_grids = na.empty(len(grids), dtype='object')
-        for gi, g in enumerate(self.grids): temp_grids[gi] = g
-        self.grids = temp_grids
+        dxs=[]
+        self.grids = na.empty(self.num_grids, dtype='object')
+        for i, grid in enumerate(f['data'].keys()):
+            self.grids[i] = self.grid(i, self, f['grid_level'][i],
+                                      f['grid_left_index'][i],
+                                      f['grid_dimensions'][i])
+            self.grids[i]._level_id = f['grid_level'][i]
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(f['grid_level'][i])
+            dxs.append(dx)
+        dx = na.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
+        self.grid_dimensions = f['grid_dimensions'][:].astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = f['grid_particle_count'][:]
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -144,16 +144,14 @@
 
 class GDFStaticOutput(StaticOutput):
     _hierarchy_class = GDFHierarchy
-    _fieldinfo_class = GDFFieldContainer
+    _fieldinfo_fallback = GDFFieldInfo
+    _fieldinfo_known = KnownGDFFields
     
     def __init__(self, filename, data_style='grid_data_format',
                  storage_filename = None):
         StaticOutput.__init__(self, filename, data_style)
-        self._handle = h5py.File(self.filename, "r")
         self.storage_filename = storage_filename
-        self.field_info = self._fieldinfo_class()
-        self._handle.close()
-        del self._handle
+        self.filename = filename
         
     def _set_units(self):
         """
@@ -165,24 +163,31 @@
             self._parse_parameter_file()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_right_edge).max()
+        self.units['cm'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         seconds = 1
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
         # This should be improved.
+        self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
-            self.units[field_name] = self._handle["/%s/field_to_cgs" % field_name]
-
+            self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+        self._handle.close()
+        del self._handle
+        
     def _parse_parameter_file(self):
+        self._handle = h5py.File(self.parameter_filename, "r")
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
-        self.refine_by = sp["refine_by"][:]
-        self.dimensionality = sp["dimensionality"][:]
-        self.current_time = sp["current_time"][:]
+        self.domain_dimensions = sp["domain_dimensions"][:]
+        self.refine_by = sp["refine_by"]
+        self.dimensionality = sp["dimensionality"]
+        self.current_time = sp["current_time"]
         self.unique_identifier = sp["unique_identifier"]
         self.cosmological_simulation = sp["cosmological_simulation"]
         if sp["num_ghost_zones"] != 0: raise RuntimeError
+        self.num_ghost_zones = sp["num_ghost_zones"]
         self.field_ordering = sp["field_ordering"]
         self.boundary_conditions = sp["boundary_conditions"][:]
         if self.cosmological_simulation:
@@ -193,7 +198,10 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
-        
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+        del self._handle
+            
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
@@ -204,4 +212,6 @@
             pass
         return False
 
-
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+        


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,5 +1,5 @@
 """
-Chombo-specific fields
+GDF-specific fields
 
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
@@ -24,90 +24,74 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
     ValidateSpatial, \
-    ValidateGridType
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
 import yt.data_objects.universal_fields
 
-class ChomboFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-ChomboFieldInfo = ChomboFieldContainer()
-add_chombo_field = ChomboFieldInfo.add_field
+log_translation_dict = {"Density": "density",
+                        "Pressure": "pressure"}
 
-add_field = add_chombo_field
+translation_dict = {"x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z"}
+                    
+# translation_dict = {"mag_field_x": "cell_centered_B_x ",
+#                     "mag_field_y": "cell_centered_B_y ",
+#                     "mag_field_z": "cell_centered_B_z "}
 
-add_field("density", function=lambda a,b: None, take_log=True,
+GDFFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = GDFFieldInfo.add_field
+
+KnownGDFFields = FieldInfoContainer()
+add_gdf_field = KnownGDFFields.add_field
+
+add_gdf_field("density", function=NullFunc, take_log=True,
           validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
+          units=r"\rm{g}/\rm{cm}^3",
+          projected_units =r"\rm{g}/\rm{cm}^2")
 
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_gdf_field("specific_energy", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("specific_energy")],
+          units=r"\rm{erg}/\rm{g}")
 
-add_field("X-momentum", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("X-Momentum")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_gdf_field("pressure", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("pressure")],
+          units=r"\rm{erg}/\rm{g}")
 
-add_field("Y-momentum", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Y-Momentum")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_gdf_field("velocity_x", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("velocity_x")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("Z-momentum", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Z-Momentum")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_gdf_field("velocity_y", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("velocity_y")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("X-magnfield", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("X-Magnfield")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_gdf_field("velocity_z", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("velocity_z")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Y-Magnfield")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("mag_field_x")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Z-Magnfield")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("mag_field_y")],
+          units=r"\rm{cm}/\rm{s}")
 
-def _MagneticEnergy(field,data):
-    return (data["X-magnfield"]**2 +
-            data["Y-magnfield"]**2 +
-            data["Z-magnfield"]**2)/2.
-add_field("MagneticEnergy", function=_MagneticEnergy, take_log=True,
-          units=r"",display_name=r"B^2/8\pi")
-ChomboFieldInfo["MagneticEnergy"]._projected_units=r""
+add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("mag_field_z")],
+          units=r"\rm{cm}/\rm{s}")
 
-def _xVelocity(field, data):
-    """generate x-velocity from x-momentum and density
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
 
-    """
-    return data["X-momentum"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
 
-def _yVelocity(field,data):
-    """generate y-velocity from y-momentum and density
-
-    """
-    #try:
-    #    return data["xvel"]
-    #except KeyError:
-    return data["Y-momentum"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field,data):
-    """generate z-velocity from z-momentum and density
-
-    """
-    return data["Z-momentum"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-    


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -25,45 +25,48 @@
 """
 from yt.utilities.io_handler import \
            BaseIOHandler
+import h5py
 
-class IOHandlerChomboHDF5(BaseIOHandler):
-    _data_style = "chombo_hdf5"
+class IOHandlerGDFHDF5(BaseIOHandler):
+    _data_style = "grid_data_format"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
     def _field_dict(self,fhandle):
-        ncomp = int(fhandle['/'].attrs['num_components'])
-        temp =  fhandle['/'].attrs.listitems()[-ncomp:]
-        val, keys = zip(*temp)
-        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        # ncomp = int(fhandle['/'].attrs['num_components'])
+        # temp =  fhandle['/'].attrs.listitems()[-ncomp:]
+        # val, keys = zip(*temp)
+        # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
         
     def _read_field_names(self,grid):
         fhandle = h5py.File(grid.filename,'r')
-        ncomp = int(fhandle['/'].attrs['num_components'])
-        field_names = [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
-        fhandle.close()
-        return field_names
+        return fhandle['field_types'].keys()
     
     def _read_data_set(self,grid,field):
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+        return fhandle['/data/grid_%010i/'%grid.id+field][:]
+        # field_dict = self._field_dict(fhandle)
+        # lstring = 'level_%i' % grid.Level
+        # lev = fhandle[lstring]
+        # dims = grid.ActiveDimensions
+        # boxsize = dims.prod()
+        
+        # grid_offset = lev[self._offset_string][grid._level_id]
+        # start = grid_offset+field_dict[field]*boxsize
+        # stop = start + boxsize
+        # data = lev[self._data_string][start:stop]
 
-        field_dict = self._field_dict(fhandle)
-        lstring = 'level_%i' % grid.Level
-        lev = fhandle[lstring]
-        dims = grid.ActiveDimensions
-        boxsize = dims.prod()
-        
-        grid_offset = lev[self._offset_string][grid._level_id]
-        start = grid_offset+field_dict[field]*boxsize
-        stop = start + boxsize
-        data = lev[self._data_string][start:stop]
-        fhandle.close()
-        return data.reshape(dims, order='F')
+        # return data.reshape(dims, order='F')
                                           
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
-        return self._read_data_set(grid,field)[sl]
+        fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+        return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
 
+    # return self._read_data_set(grid,field)[sl]
+


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/maestro/api.py
--- a/yt/frontends/maestro/api.py
+++ b/yt/frontends/maestro/api.py
@@ -36,7 +36,6 @@
       MaestroStaticOutput
 
 from .fields import \
-      MaestroFieldContainer, \
       MaestroFieldInfo, \
       add_maestro_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -54,9 +54,12 @@
     yt2maestroFieldsDict, \
     maestro_FAB_header_pattern
 
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from .fields import \
-    MaestroFieldContainer, \
-    add_field
+    MaestroFieldInfo, \
+    add_maestro_field, \
+    KnownMaestroFields
 
 
 class MaestroGrid(AMRGridPatch):
@@ -118,7 +121,6 @@
 class MaestroHierarchy(AMRHierarchy):
     grid = MaestroGrid
     def __init__(self, pf, data_style='maestro'):
-        self.field_info = MaestroFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir,'Header')
@@ -391,21 +393,6 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
-
     def _setup_derived_fields(self):
         pass
 
@@ -431,7 +418,8 @@
     *filename*, without looking at the Maestro hierarchy.
     """
     _hierarchy_class = MaestroHierarchy
-    _fieldinfo_class = MaestroFieldContainer
+    _fieldinfo_fallback = MaestroFieldInfo
+    _fieldinfo_known = KnownMaestroFields
 
     def __init__(self, plotname, paramFilename=None, 
                  data_style='maestro', paranoia=False,
@@ -455,7 +443,6 @@
         # this is the unit of time; NOT the current time
         self.parameters["Time"] = 1 # second
 
-        self.field_info = self._fieldinfo_class()
         self._parse_header_file()
 
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/maestro/fields.py
--- a/yt/frontends/maestro/fields.py
+++ b/yt/frontends/maestro/fields.py
@@ -27,7 +27,8 @@
 from yt.utilities.physical_constants import \
     mh, kboltz
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -35,17 +36,11 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class MaestroFieldContainer(CodeFieldInfoContainer):
-    """
-    All Maestro-specific fields are stored in here.
-    """
-    _shared_state = {}
-    _field_list = {}
-MaestroFieldInfo = MaestroFieldContainer()
-add_maestro_field = MaestroFieldInfo.add_field
+KnownMaestroFields = FieldInfoContainer()
+add_maestro_field = KnownMaestroFields.add_field
 
-
-add_field = add_maestro_field
+MaestroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = MaestroFieldInfo.add_field
 
 add_field("density", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("density")],


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/nyx/api.py
--- a/yt/frontends/nyx/api.py
+++ b/yt/frontends/nyx/api.py
@@ -25,5 +25,5 @@
 """
 
 from .data_structures import NyxGrid, NyxHierarchy, NyxStaticOutput
-from .fields import NyxFieldContainer, nyx_fields, add_nyx_field
+from .fields import NyxFieldInfo, KnownNyxFields, add_nyx_field
 from .io import IOHandlerNative


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -41,13 +41,15 @@
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.data_objects.hierarchy import AMRHierarchy
 from yt.data_objects.static_output import StaticOutput
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.amr_utils import get_box_grids_level
 from yt.utilities.definitions import mpc_conversion
 
 from .definitions import parameter_type_dict, nyx_to_enzo_dict, \
                          fab_header_pattern, nyx_particle_field_names
 from .utils import boxlib_bool_to_int
-from .fields import NyxFieldContainer, add_field
+from .fields import NyxFieldInfo, add_nyx_field, KnownNyxFields
 
 
 class NyxGrid(AMRGridPatch):
@@ -118,7 +120,6 @@
     grid = NyxGrid
 
     def __init__(self, pf, data_style="nyx_native"):
-        self.field_info = NyxFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         self.directory = pf.path
@@ -420,20 +421,6 @@
         return self.grids[mask]
 
     def _setup_field_list(self):
-        self.derived_field_list = []
-
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
-            except:
-                continue
-            available = na.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -446,7 +433,7 @@
                 # Note that we call add_field on the field_info directly.  This
                 # will allow the same field detection mechanism to work for 1D,
                 # 2D and 3D fields.
-                self.pf.field_info.add_field(field, lambda a, b: None,
+                self.pf.field_info.add_field(field, NullFunc,
                                              convert_function=cf,
                                              take_log=False, particle_type=True)
 
@@ -468,23 +455,19 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        # not sure what the case for this is.
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+        for field in self.parameter_file.field_info:
+            try:
+                fd = self.parameter_file.field_info[field].get_dependencies(
+                            pf = self.parameter_file)
+            except:
+                continue
+            available = na.all([f in self.field_list for f in fd.requested])
+            if available: self.derived_field_list.append(field)
         for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None, convert_function=cf,
-                      take_log=False)
-
-    def _setup_derived_fields(self):
-        pass
+            if field not in self.derived_field_list:
+                self.derived_field_list.append(field)
 
     def _initialize_state_variables(self):
         """
@@ -509,7 +492,8 @@
 
     """
     _hierarchy_class = NyxHierarchy
-    _fieldinfo_class = NyxFieldContainer
+    _fieldinfo_fallback = NyxFieldInfo
+    _fieldinfo_known = KnownNyxFields
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
@@ -569,9 +553,6 @@
         # ``self.print_key_parameters()``
         StaticOutput.__init__(self, plotname.rstrip("/"), data_style=data_style)
 
-        # @todo: field pruning should happen here
-        self.field_info = self._fieldinfo_class()
-
         # @todo: check all of these and hopefully factor out of the constructor.
         # These should maybe not be hardcoded?
         self.parameters["HydroMethod"] = "nyx"  # always PPM DE


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/nyx/fields.py
--- a/yt/frontends/nyx/fields.py
+++ b/yt/frontends/nyx/fields.py
@@ -29,28 +29,26 @@
 
 import yt.data_objects.universal_fields
 
-from yt.data_objects.field_info_container import CodeFieldInfoContainer, \
+from yt.data_objects.field_info_container import FieldInfoContainer, \
+    NullFunc, TranslationFunc, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, ValidateSpatial, \
     ValidateGridType
 from yt.utilities.physical_constants import mh, kboltz
 
-class NyxFieldContainer(CodeFieldInfoContainer):
-    """ All nyx-specific fields are stored in here. """
-    _shared_state = {}
-    _field_list = {}
+NyxFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = NyxFieldInfo.add_field
 
-nyx_fields = NyxFieldContainer()
-add_field = nyx_fields.add_field
-add_nyx_field = add_field  # alias for API
+KnownNyxFields = FieldInfoContainer()
+add_nyx_field = KnownNyxFields.add_field 
 
 # Density
-add_field("density", function=lambda a, b: None, take_log=True,
+add_nyx_field("density", function=lambda a, b: None, take_log=True,
           validators=[ValidateDataField("density")],
           units=r"\rm{g}} / \rm{cm}^3",
           projected_units =r"\rm{g}} / \rm{cm}^2")
-nyx_fields["density"]._projected_units =r"\rm{g}} / \rm{cm}^2"
+KnownNyxFields["density"]._projected_units =r"\rm{g}} / \rm{cm}^2"
 
-add_field("Density", function=lambda a, b: b["density"], take_log=True,
+add_field("Density", function=TranslationFunc("density"), take_log=True,
           units=r"\rm{g}} / \rm{cm}^3",
           projected_units =r"\rm{g}} / \rm{cm}^2")
 
@@ -61,28 +59,30 @@
     return data["particle_mass"]
 add_field("ParticleMassMsun", function=_particle_mass_m_sun,
           validators=[ValidateSpatial(0), ValidateDataField("particle_mass")],
-          particle_type=True, convert_function=_convertParticleMassMsun, take_log=True, units=r"\rm{M_{\odot}}")
+          particle_type=True, convert_function=_convertParticleMassMsun,
+          take_log=True, units=r"\rm{M_{\odot}}")
           
-add_field("Dark_Matter_Density", function=lambda a, b: b["particle_mass_density"], take_log=True,
+add_nyx_field("Dark_Matter_Density", function=TranslationFunc("particle_mass_density"),
+          take_log=True,
           units=r"\rm{g}} / \rm{cm}^3",particle_type=True,
           projected_units =r"\rm{g}} / \rm{cm}^2")
 
 
 # Energy Density
 # @todo: ``energy_density``
-add_field("total_energy", function=lambda a, b: None, take_log=True,
+add_nyx_field("total_energy", function=lambda a, b: None, take_log=True,
           validators=[ValidateDataField("total_energy")],
           units=r"\rm{M_{\odot}} (\rm{km} / \rm{s})^2")
 
 # Momentum in each dimension.
 # @todo: ``momentum_x``
-add_field("x-momentum", function=lambda a, b: None, take_log=False,
+add_nyx_field("x-momentum", function=lambda a, b: None, take_log=False,
           validators=[ValidateDataField("x-momentum")],
           units=r"\rm{M_{\odot}} \rm{km} / \rm{s}")
-add_field("y-momentum", function=lambda a, b: None, take_log=False,
+add_nyx_field("y-momentum", function=lambda a, b: None, take_log=False,
           validators=[ValidateDataField("y-momentum")],
           units=r"\rm{M_{\odot}} \rm{km} / \rm{s}")
-add_field("z-momentum", function=lambda a, b: None, take_log=False,
+add_nyx_field("z-momentum", function=lambda a, b: None, take_log=False,
           validators=[ValidateDataField("z-momentum")],
           units=r"\rm{M_{\odot}} \rm{km} / \rm{s}")
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -28,7 +28,7 @@
 
 import os
 import numpy as na
-from yt.utilities.amr_utils import read_castro_particles
+from yt.utilities.amr_utils import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
 from definitions import fab_header_pattern, nyx_particle_field_names, \
@@ -57,80 +57,24 @@
         if field in nyx_particle_field_names:
             return self._read_particle_field(grid, field)
         filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen, 'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        """
-        if grid._paranoid:
-            mylog.warn("Castro Native reader: Paranoid read mode.")
-            header_re = re.compile(fab_header_pattern)
-            bytesPerReal, endian, start, stop, centerType, nComponents = \
-                headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian.
-            # @todo: this code is ugly.
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i' % bytesPerReal)  # always a floating point
-
-            # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." % grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." % grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." % grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." % grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." % grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-        else:
-        """
-        start = grid.start_index
-        stop = grid.stop_index
-        dtype = grid.hierarchy._dtype
+        offset1 = grid._offset[field]
+        # one field has nElements * bytesPerReal bytes and is located
+        # nElements * bytesPerReal * field_index from the offset location
         bytesPerReal = grid.hierarchy._bytesPerReal
 
+        fieldname = yt_to_nyx_fields_dict.get(field, field)
+        field_index = grid.field_indexes[fieldname]
         nElements = grid.ActiveDimensions.prod()
+        offset2 = int(nElements*bytesPerReal*field_index)
 
-        # one field has nElements * bytesPerReal bytes and is located
-        # nElements * bytesPerReal * field_index from the offset location
-        if yt_to_nyx_fields_dict.has_key(field):
-            fieldname = yt_to_nyx_fields_dict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
+        dtype = grid.hierarchy._dtype
+        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
+        field = field.reshape(grid.ActiveDimensions, order='F')
 
         # @todo: we can/should also check against the max and min in the header
         # file
 
-        inFile.close()
         return field
 
     def _read_data_slice(self, grid, field, axis, coord):


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/orion/api.py
--- a/yt/frontends/orion/api.py
+++ b/yt/frontends/orion/api.py
@@ -34,7 +34,6 @@
       OrionStaticOutput
 
 from .fields import \
-      OrionFieldContainer, \
       OrionFieldInfo, \
       add_orion_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -23,46 +23,41 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import os
 import re
-import os
 import weakref
+
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
 import numpy as na
 
-from collections import \
-    defaultdict
-from string import \
-    strip, \
-    rstrip
-from stat import \
-    ST_CTIME
-
 from yt.funcs import *
-from yt.data_objects.grid_patch import \
-           AMRGridPatch
-from yt.data_objects.hierarchy import \
-           AMRHierarchy
-from yt.data_objects.static_output import \
-           StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.data_objects.hierarchy import AMRHierarchy
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import mpc_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_root_only
+    parallel_root_only
 
 from .definitions import \
     orion2enzoDict, \
     parameterDict, \
     yt2orionFieldsDict, \
     orion_FAB_header_pattern
-
 from .fields import \
-    OrionFieldContainer, \
-    add_field
+    OrionFieldInfo, \
+    add_orion_field, \
+    KnownOrionFields
 
 
 class OrionGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions,start,stop,paranoia=False,**kwargs):
-        AMRGridPatch.__init__(self, index,**kwargs)
+    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
+                 dimensions, start, stop, paranoia=False, **kwargs):
+        AMRGridPatch.__init__(self, index, **kwargs)
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia
@@ -122,7 +117,6 @@
 class OrionHierarchy(AMRHierarchy):
     grid = OrionGrid
     def __init__(self, pf, data_style='orion_native'):
-        self.field_info = OrionFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir,'Header')
@@ -399,21 +393,6 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
-
     def _setup_derived_fields(self):
         pass
 
@@ -439,7 +418,8 @@
     *filename*, without looking at the Orion hierarchy.
     """
     _hierarchy_class = OrionHierarchy
-    _fieldinfo_class = OrionFieldContainer
+    _fieldinfo_fallback = OrionFieldInfo
+    _fieldinfo_known = KnownOrionFields
 
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='orion_native', paranoia=False,
@@ -461,7 +441,6 @@
 
         StaticOutput.__init__(self, plotname.rstrip("/"),
                               data_style='orion_native')
-        self.field_info = self._fieldinfo_class()
 
         # These should maybe not be hardcoded?
         self.parameters["HydroMethod"] = 'orion' # always PPM DE


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -25,7 +25,8 @@
 from yt.utilities.physical_constants import \
     mh, kboltz
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -33,25 +34,17 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class OrionFieldContainer(CodeFieldInfoContainer):
-    """
-    All Orion-specific fields are stored in here.
-    """
-    _shared_state = {}
-    _field_list = {}
-OrionFieldInfo = OrionFieldContainer()
-add_orion_field = OrionFieldInfo.add_field
 
+KnownOrionFields = FieldInfoContainer()
+add_orion_field = KnownOrionFields.add_field
 
-add_field = add_orion_field
+OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = OrionFieldInfo.add_field
 
-# def _convertDensity(data):
-#     return data.convert("Density")
 add_field("density", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("density")],
           units=r"\rm{g}/\rm{cm}^3")
 OrionFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-#OrionFieldInfo["density"]._convert_function=_convertDensity
 
 add_field("eden", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("eden")],


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -111,7 +111,7 @@
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
         field = na.fromfile(inFile,count=nElements,dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
+        field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/ramses/api.py
--- a/yt/frontends/ramses/api.py
+++ b/yt/frontends/ramses/api.py
@@ -34,7 +34,6 @@
       RAMSESStaticOutput
 
 from .fields import \
-      RAMSESFieldContainer, \
       RAMSESFieldInfo, \
       add_ramses_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -39,13 +39,15 @@
     import _ramses_reader
 except ImportError:
     _ramses_reader = None
-from .fields import RAMSESFieldContainer
+from .fields import RAMSESFieldInfo, KnownRAMSESFields
 from yt.utilities.definitions import \
     mpc_conversion
 from yt.utilities.amr_utils import \
     get_box_grids_level
 from yt.utilities.io_handler import \
     io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -108,7 +110,6 @@
     
     def __init__(self,pf,data_style='ramses'):
         self.data_style = data_style
-        self.field_info = RAMSESFieldContainer()
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
@@ -265,20 +266,6 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -287,7 +274,8 @@
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESHierarchy
-    _fieldinfo_class = RAMSESFieldContainer
+    _fieldinfo_fallback = RAMSESFieldInfo
+    _fieldinfo_known = KnownRAMSESFields
     _handle = None
     
     def __init__(self, filename, data_style='ramses',
@@ -297,8 +285,6 @@
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-        self.field_info = self._fieldinfo_class()
-
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -24,7 +24,8 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,13 +33,12 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class RAMSESFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-RAMSESFieldInfo = RAMSESFieldContainer()
-add_ramses_field = RAMSESFieldInfo.add_field
 
-add_field = add_ramses_field
+KnownRAMSESFields = FieldInfoContainer()
+add_ramses_field = KnownRAMSESFields.add_field
+
+RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = RAMSESFieldInfo.add_field
 
 known_ramses_fields = [
     "Density",


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -6,6 +6,7 @@
     config = Configuration('frontends',parent_package,top_path)
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")
     config.add_subpackage("flash")


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -36,12 +36,15 @@
 from yt.data_objects.static_output import \
     StaticOutput
 from yt.utilities.logger import ytLogger as mylog
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.amr_utils import \
     get_box_grids_level
 
 from .fields import \
     StreamFieldContainer, \
-    add_stream_field
+    add_stream_field, \
+    KnownStreamFields
 
 class StreamGrid(AMRGridPatch):
     """
@@ -244,6 +247,7 @@
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_class = StreamFieldContainer
+    _fieldinfo_known = KnownStreamFields
     _data_style = 'stream'
 
     def __init__(self, stream_handler):
@@ -255,7 +259,6 @@
         self.stream_handler = stream_handler
         StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
 
-        self.field_info = self._fieldinfo_class()
         self.units = {}
         self.time_units = {}
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/tiger/api.py
--- a/yt/frontends/tiger/api.py
+++ b/yt/frontends/tiger/api.py
@@ -34,7 +34,6 @@
       TigerStaticOutput
 
 from .fields import \
-      TigerFieldContainer, \
       TigerFieldInfo, \
       add_tiger_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -31,7 +31,9 @@
 from yt.data_objects.static_output import \
            StaticOutput
 
-from .fields import TigerFieldContainer
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import TigerFieldInfo, KnownTigerFields
 
 class TigerGrid(AMRGridPatch):
     _id_offset = 0
@@ -126,16 +128,13 @@
     def field_list(self):
         return self.file_mapping.keys()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            add_tiger_field(field, lambda a, b: None)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
 class TigerStaticOutput(StaticOutput):
     _hierarchy_class = TigerHierarchy
-    _fieldinfo_class = TigerFieldContainer
+    _fieldinfo_fallback = TigerFieldInfo
+    _fieldinfo_known = KnownTigerFields
 
     def __init__(self, rhobname, root_size, max_grid_size=128,
                  data_style='tiger', storage_filename = None):
@@ -151,7 +150,8 @@
         if not iterable(max_grid_size): max_grid_size = (max_grid_size,) * 3
         self.max_grid_size = max_grid_size
 
-        self.field_info = self._fieldinfo_class()
+        self.field_info = FieldInfoContainer.create_with_fallback(
+                            self._fieldinfo_fallback)
 
         # We assume that we have basename + "rhob" and basename + "temp"
         # to get at our various parameters.


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/frontends/tiger/fields.py
--- a/yt/frontends/tiger/fields.py
+++ b/yt/frontends/tiger/fields.py
@@ -24,7 +24,8 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,12 +33,9 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class TigerFieldContainer(CodeFieldInfoContainer):
-    """
-    This is a container for Tiger-specific fields.
-    """
-    _shared_state = {}
-    _field_list = {}
-TigerFieldInfo = TigerFieldContainer()
-add_tiger_field = TigerFieldInfo.add_field
+KnownTigerFields = FieldInfoContainer()
+add_tiger_field = KnownTigerFields.add_field
 
+TigerFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = TigerFieldInfo.add_field
+


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -195,6 +195,11 @@
         return func(*args, **kwargs)
     return check_parallel_rank
 
+def rootloginfo(*args):
+    from yt.config import ytcfg
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return
+    mylog.info(*args)
+
 def deprecate(func):
     """
     This decorator issues a deprecation warning.


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -149,6 +149,7 @@
             print "WARNING: %s has no _route_prefix attribute.  Not notifying."
             continue
             w._route_prefix = token
+    repl._global_token = token
     repl.activate()
     repl.execution_thread.wait()
     print


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -220,6 +220,7 @@
                               _resources = ("/resources/:path#.+#", "GET"),
                               _philogl = ("/philogl/:path#.+#", "GET"),
                               _js = ("/js/:path#.+#", "GET"),
+                              _leaflet = ("/leaflet/:path#.+#", "GET"),
                               _images = ("/images/:path#.+#", "GET"),
                               _theme = ("/theme/:path#.+#", "GET"),
                               _session_py = ("/session.py", "GET"),
@@ -340,6 +341,13 @@
             return
         return open(pp).read()
 
+    def _leaflet(self, path):
+        pp = os.path.join(local_dir, "html", "leaflet", path)
+        if not os.path.exists(pp):
+            response.status = 404
+            return
+        return open(pp).read()
+
     def _images(self, path):
         pp = os.path.join(local_dir, "html", "images", path)
         if not os.path.exists(pp):
@@ -515,6 +523,21 @@
                                          'widget_data_name': '_twidget_data'})
 
     @lockit
+    def create_mapview(self, widget_name):
+        # We want multiple maps simultaneously
+        uu = "/%s/%s" % (getattr(self, "_global_token", ""),
+                        str(uuid.uuid1()).replace("-","_"))
+        from .pannable_map import PannableMapServer
+        data = self.locals[widget_name].data_source
+        field_name = self.locals[widget_name]._current_field
+        pm = PannableMapServer(data, field_name, route_prefix = uu)
+        self.locals['_tpm'] = pm
+        self.locals['_twidget_data'] = {'prefix': uu, 'field':field_name}
+        self.execution_thread.queue.put({'type': 'add_widget',
+                                         'name': '_tpm',
+                                         'widget_data_name': '_twidget_data'})
+
+    @lockit
     def create_slice(self, pfname, center, axis, field, onmax):
         if not onmax: 
             center_string = \


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/gui/reason/html/index.html
--- a/yt/gui/reason/html/index.html
+++ b/yt/gui/reason/html/index.html
@@ -80,6 +80,10 @@
          In that case, it will default to whatever is in the family. --><link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css?family=Inconsolata">
 
+    <!-- LEAFLET STUFF -->
+    <script type="text/javascript" src="leaflet/leaflet.js"></script>
+    <link rel="stylesheet" href="leaflet/leaflet.css" />
+
     <!-- LIBS --><script type="text/javascript" src="resources/adapter/ext/ext-base.js"></script><script type="text/javascript" src="resources/ext-all.js"></script>
@@ -119,6 +123,9 @@
     <!-- THE GRID VIEWER FUNCTIONS --><script type="text/javascript" src="js/widget_isocontour.js"></script>
 
+    <!-- THE PANNABLE MAP FUNCTIONS -->
+    <script type="text/javascript" src="js/widget_pannablemap.js"></script>
+
     <script id="gv-shader-fs" type="x-shader/x-fragment">
     #ifdef GL_ES
     precision highp float;


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/gui/reason/html/js/widget_pannablemap.js
--- /dev/null
+++ b/yt/gui/reason/html/js/widget_pannablemap.js
@@ -0,0 +1,76 @@
+/**********************************************************************
+The Pannable Map Widget
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+
+var WidgetPannableMap = function(python_varname, widget_data) {
+    this.id = python_varname;
+    this.widget_data = widget_data;
+
+    viewport.get("center-panel").add(
+        {
+            xtype: 'panel',
+            id: "pm_" + this.id,
+            title: "Pannable Map",
+            iconCls: 'graph',
+            autoScroll: true,
+            layout:'absolute',
+            closable: true,
+            items: [ 
+                {
+                    xtype:'box',
+                    autoEl: {
+                        tag: 'div',
+                        id: "map_" + this.id,
+                        width: 512,
+                        height: 512,
+                    },
+                    x: 10,
+                    y: 10,
+                    width: 512,
+                    height: 512,
+                    listeners: {afterrender:
+                        function() {
+                          var map = new L.Map('map_' + python_varname, {
+                                  center: new L.LatLng(0.0, 0.0),
+                                  zoom: 0,
+                                  });
+                          var cloudmadeUrl = widget_data['prefix'] + '/map/{z}/{x}/{y}.png';
+                          cloudmade = new L.TileLayer(cloudmadeUrl, {maxZoom: 18});
+                          map.addLayer(cloudmade);
+                    }},
+                }  
+            ]
+        }
+    );
+
+    viewport.get("center-panel").activate("pm_" + this.id);
+    viewport.doLayout();
+    this.panel = viewport.get("center-panel").get("pm_" + this.id);
+    this.panel.doLayout();
+    examine = this.panel;
+
+    this.accept_results = function(payload) { }
+}
+
+widget_types['pannable_map'] = WidgetPannableMap;


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/gui/reason/html/js/widget_plotwindow.js
--- a/yt/gui/reason/html/js/widget_plotwindow.js
+++ b/yt/gui/reason/html/js/widget_plotwindow.js
@@ -331,6 +331,21 @@
                         }); 
                     }
                 },{
+                    xtype: 'button',
+                    text: 'Pannable Map',
+                    x: 10,
+                    y: 335,
+                    width: 80,
+                    tooltip: "Open a pannable map in a new tab",
+                    handler: function(b,e) {
+                        img_data = image_dom.src;
+                        yt_rpc.ExtDirectREPL.create_mapview(
+                            {widget_name:python_varname},
+                        function(rv) {
+                            /*alert(rv);*/
+                        }); 
+                    }
+                },{
                     xtype: 'panel',
                     layout: 'vbox',
                     id: 'rhs_panel_' + python_varname,


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -47,14 +47,16 @@
     return func
 
 class PannableMapServer(object):
-    def __init__(self, data, field):
+    _widget_name = "pannable_map"
+    def __init__(self, data, field, route_prefix = ""):
         self.data = data
         self.pf = data.pf
         self.field = field
-        bottle.route("/map/:L/:x/:y.png")(self.map)
-        bottle.route("/")(self.index)
-        bottle.route("/index.html")(self.index)
-        bottle.route("/static/:filename#.+#")(self.static)
+        
+        bottle.route("%s/map/:L/:x/:y.png" % route_prefix)(self.map)
+        bottle.route("%s/" % route_prefix)(self.index)
+        bottle.route("%s/index.html" % route_prefix)(self.index)
+        bottle.route("%s/static/:filename#.+#" % route_prefix)(self.static)
         # This is a double-check, since we do not always mandate this for
         # slices:
         self.data[self.field] = self.data[self.field].astype("float64")


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -38,9 +38,15 @@
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function
-from yt.config import ytcfg
+from yt.config import ytcfg, ytcfgDefaults
 import yt.utilities.physical_constants as physical_constants
 
+from yt.utilities.logger import level as __level
+if __level >= int(ytcfgDefaults["loglevel"]):
+    # This won't get displayed.
+    mylog.debug("Turning off NumPy error reporting")
+    na.seterr(all = 'ignore')
+
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
@@ -60,7 +66,7 @@
     CastroStaticOutput, CastroFieldInfo, add_castro_field
 
 from yt.frontends.nyx.api import \
-    NyxStaticOutput, nyx_fields, add_nyx_field
+    NyxStaticOutput, NyxFieldInfo, add_nyx_field
 
 from yt.frontends.orion.api import \
     OrionStaticOutput, OrionFieldInfo, add_orion_field
@@ -77,6 +83,9 @@
 from yt.frontends.chombo.api import \
     ChomboStaticOutput, ChomboFieldInfo, add_chombo_field
 
+from yt.frontends.gdf.api import \
+    GDFStaticOutput, GDFFieldInfo, add_gdf_field
+
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/utilities/_amr_utils/PointsInVolume.pyx
--- a/yt/utilities/_amr_utils/PointsInVolume.pyx
+++ b/yt/utilities/_amr_utils/PointsInVolume.pyx
@@ -215,3 +215,38 @@
                 break
     return good
 
+def calculate_fill_grids(int fill_level, int refratio, int last_level,
+                         np.ndarray[np.int64_t, ndim=1] domain_width,
+                         np.ndarray[np.int64_t, ndim=1] cg_start_index,
+                         np.ndarray[np.int32_t, ndim=1] cg_dims,
+                         np.ndarray[np.int64_t, ndim=1] g_start_index,
+                         np.ndarray[np.int32_t, ndim=1] g_dims,
+                         np.ndarray[np.int32_t, ndim=3] g_child_mask):
+    cdef np.int64_t cgstart[3], gstart[3]
+    cdef np.int64_t cgend[3], gend[3]
+    cdef np.int64_t dw[3]
+    cdef np.int64_t cxi, cyi, czi, gxi, gyi, gzi, ci, cj, ck
+    cdef int i, total
+    for i in range(3):
+        dw[i] = domain_width[i]
+        cgstart[i] = cg_start_index[i]
+        gstart[i] = g_start_index[i]
+        cgend[i] = cgstart[i] + cg_dims[i]
+        gend[i] = gstart[i] + g_dims[i]
+    for cxi in range(cgstart[0], cgend[0]+1):
+        ci = (cxi % dw[0])
+        if ci < 0: ci += dw[0]
+        if ci < gstart[0]*refratio or ci >= gend[0]*refratio: continue
+        gxi = (<np.int64_t> (ci / refratio)) - gstart[0]
+        for cyi in range(cgstart[1], cgend[1]):
+            cj = (cyi % dw[1])
+            if cj < 0: cj += dw[1]
+            if cj < gstart[1]*refratio or cj >= gend[1]*refratio: continue
+            gyi = (<np.int64_t> (cj / refratio)) - gstart[1]
+            for czi in range(cgstart[2], cgend[2]):
+                ck = (czi % dw[2])
+                if ck < 0: ck += dw[2]
+                if ck < gstart[2]*refratio or cj >= gend[2]*refratio: continue
+                gzi = (<np.int64_t> (ck / refratio)) - gstart[2]
+                if last_level or g_child_mask[gxi, gyi, gzi] > 0: total += 1
+    return total


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -28,6 +28,7 @@
 cimport cython
 
 from stdio cimport fopen, fclose, FILE
+cimport libc.stdlib as stdlib
 
 #cdef inline int imax(int i0, int i1):
     #if i0 > i1: return i0
@@ -48,6 +49,21 @@
     int fseek(FILE *stream, long offset, int whence)
     size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream)
     long ftell(FILE *stream)
+    char *fgets(char *s, int size, FILE *stream)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def read_and_seek(char *filename, int offset1, int offset2,
+                  np.ndarray buffer, int bytes):
+    cdef FILE *f = fopen(filename, "rb")
+    cdef void *buf = <void *> buffer.data
+    cdef char line[1024]
+    cdef size_t n = 1023
+    fseek(f, offset1, SEEK_SET)
+    fgets(line, n, f)
+    fseek(f, offset2, SEEK_CUR)
+    fread(buf, 1, bytes, f)
+    fclose(f)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -148,6 +148,49 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def obtain_rvec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] xf
+    cdef np.ndarray[np.float64_t, ndim=1] yf
+    cdef np.ndarray[np.float64_t, ndim=1] zf
+    cdef np.ndarray[np.float64_t, ndim=2] rf
+    cdef np.ndarray[np.float64_t, ndim=3] xg
+    cdef np.ndarray[np.float64_t, ndim=3] yg
+    cdef np.ndarray[np.float64_t, ndim=3] zg
+    cdef np.ndarray[np.float64_t, ndim=4] rg
+    cdef np.float64_t c[3]
+    cdef int i, j, k
+    center = data.get_field_parameter("center")
+    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
+    if len(data['x'].shape) == 1:
+        # One dimensional data
+        xf = data['x']
+        yf = data['y']
+        zf = data['z']
+        rf = np.empty((3, xf.shape[0]), 'float64')
+        for i in range(xf.shape[0]):
+            rf[0, i] = xf[i] - c[0]
+            rf[1, i] = yf[i] - c[1]
+            rf[2, i] = zf[i] - c[2]
+        return rf
+    else:
+        # Three dimensional data
+        xg = data['x']
+        yg = data['y']
+        zg = data['z']
+        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
+        for i in range(xg.shape[0]):
+            for j in range(xg.shape[1]):
+                for k in range(xg.shape[2]):
+                    rg[0,i,j,k] = xg[i,j,k] - c[0]
+                    rg[1,i,j,k] = yg[i,j,k] - c[1]
+                    rg[2,i,j,k] = zg[i,j,k] - c[2]
+        return rg
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1058,6 +1058,19 @@
             print
             loki = raw_input("Press enter to go on, Ctrl-C to exit.")
             cedit.config.setoption(uu, hgrc_path, "bb.username=%s" % bbusername)
+        bb_fp = "81:2b:08:90:dc:d3:71:ee:e0:7c:b4:75:ce:9b:6c:48:94:56:a1:fe"
+        if uu.config("hostfingerprints", "bitbucket.org", None) is None:
+            print "Let's also add bitbucket.org to the known hosts, so hg"
+            print "doesn't warn us about bitbucket."
+            print "We will add this:"
+            print
+            print "   [hostfingerprints]"
+            print "   bitbucket.org = %s" % (bb_fp)
+            print
+            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
+            cedit.config.setoption(uu, hgrc_path,
+                                   "hostfingerprints.bitbucket.org=%s" % bb_fp)
+
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -937,6 +937,15 @@
 /* These functions are both called with
     func(cubedata, griddata) */
 
+static void dcNothing(PyArrayObject* c_data, npy_int64 xc, npy_int64 yc, npy_int64 zc,
+                     PyArrayObject* g_data, npy_int64 xg, npy_int64 yg, npy_int64 zg)
+{
+    return;
+}
+
+/* These functions are both called with
+    func(cubedata, griddata) */
+
 static void dcRefine(PyArrayObject* c_data, npy_int64 xc, npy_int64 yc, npy_int64 zc,
                      PyArrayObject* g_data, npy_int64 xg, npy_int64 yg, npy_int64 zg)
 {
@@ -1107,36 +1116,37 @@
     cdx = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 0));
     cdy = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 1));
     cdz = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 2));
-    cxe = (cxs + cdx - 1);
-    cye = (cys + cdy - 1);
-    cze = (czs + cdz - 1);
+    cxe = (cxs + cdx);
+    cye = (cys + cdy);
+    cze = (czs + cdz);
 
     /* It turns out that C89 doesn't define a mechanism for choosing the sign
        of the remainder.
     */
         //fprintf(stderr, "ci == %d, cxi == %d, dw[0] == %d\n", (int) ci, (int) cxi, (int) dw[0]);
-    for(cxi=cxs;cxi<=cxe;cxi++) {
+    for(cxi=cxs;cxi<cxe;cxi++) {
         ci = (cxi % dw[0]);
         ci = (ci < 0) ? ci + dw[0] : ci;
         if ( ci < gxs*refratio || ci >= gxe*refratio) continue;
         gxi = floor(ci / refratio) - gxs;
-        for(cyi=cys;cyi<=cye;cyi++) {
+        for(cyi=cys;cyi<cye;cyi++) {
             cj = cyi % dw[1];
             cj = (cj < 0) ? cj + dw[1] : cj;
             if ( cj < gys*refratio || cj >= gye*refratio) continue;
             gyi = floor(cj / refratio) - gys;
-            for(czi=czs;czi<=cze;czi++) {
+            for(czi=czs;czi<cze;czi++) {
                 ck = czi % dw[2];
                 ck = (ck < 0) ? ck + dw[2] : ck;
                 if ( ck < gzs*refratio || ck >= gze*refratio) continue;
                 gzi = floor(ck / refratio) - gzs;
                     if ((ll) || (*(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)) 
                 {
-                for(n=0;n<n_fields;n++){
-                    to_call(c_data[n],
-                        cxi - cxs, cyi - cys, czi - czs,
-                        g_data[n], gxi, gyi, gzi);
-                }
+                if (direction!=2)
+                  for(n=0;n<n_fields;n++){
+                      to_call(c_data[n],
+                          cxi - cxs, cyi - cys, czi - czs,
+                          g_data[n], gxi, gyi, gzi);
+                  }
                 total += 1;
                 }
             }


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -790,6 +790,8 @@
         deps = []
         fi = self.pf.field_info
         for field in fields:
+            if any(getattr(v,"ghost_zones", 0) > 0 for v in
+                   fi[field].validators): continue
             deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
         return list(set(deps))
 




diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -32,7 +32,8 @@
 from yt.data_objects.profiles import \
     BinnedProfile1D, \
     BinnedProfile2D
-from yt.utilities.definitions import axis_names, inv_axis_names
+from yt.utilities.definitions import \
+    axis_names, inv_axis_names, x_dict, y_dict
 from .plot_types import \
     FixedResolutionPlot, \
     SlicePlot, \
@@ -1790,3 +1791,75 @@
             ax.clear()
             cbars.append(ax)
     return fig, tr, cbars
+
+def _MPLFixImage(data_source, image_obj, field, cbar, cls):
+    nx, ny = image_obj.get_size()
+    def f(axes):
+        x0, x1 = axes.get_xlim()
+        y0, y1 = axes.get_ylim()
+        frb = cls(data_source, (x0, x1, y0, y1), (nx, ny))
+        image_obj.set_data(frb[field])
+        mi, ma = frb[field].min(), frb[field].max()
+        cbar.norm.autoscale((mi, ma))
+        image_obj.set_extent([x0, x1, y0, y1])
+        cbar.update_bruteforce(image_obj)
+    return f
+
+def matplotlib_widget(data_source, field, npix):
+    r"""Create a widget from a data_source that uses the Matplotlib interaction
+    method to pan, zoom, and so on.
+
+    This is a simple way to take a yt data source, for instance a projection or
+    a slice, and to create a matplotlib view into it that you can pan and zoom.
+    It uses the matplotlib interaction engine to manage input and display.
+
+    Parameters
+    ----------
+    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
+        This is the source to be pixelized, which can be a projection or a
+        slice.  
+    field : string
+        The field that you want to display in the window.
+    npix : int
+        The number of pixels on a side you want the image to be.
+
+    Examples
+    --------
+
+    >>> pf = load("DD0030/DD0030")
+    >>> p = pf.h.proj(0, "Density")
+    >>> matplotlib_widget(p, "Density", 1024)
+
+    """
+    import pylab
+    import matplotlib.colors
+    from .fixed_resolution import FixedResolutionBuffer, \
+            ObliqueFixedResolutionBuffer
+    pf = data_source.pf
+    if getattr(data_source, "axis", 4) < 3:
+        cls = FixedResolutionBuffer
+        ax = data_source.axis
+        extent = [pf.domain_left_edge[x_dict[ax]],
+                  pf.domain_right_edge[x_dict[ax]],
+                  pf.domain_left_edge[y_dict[ax]],
+                  pf.domain_right_edge[y_dict[ax]]]
+    else:
+        cls = ObliqueFixedResolutionBuffer
+        extent = [0.0, 1.0, 0.0, 1.0]
+    take_log = pf.field_info[field].take_log
+    if take_log:
+        norm = matplotlib.colors.LogNorm()
+    else:
+        norm = matplotlib.colors.Normalize()
+    ax = pylab.figure().gca()
+    ax.autoscale(False)
+    axi = ax.imshow(na.random.random((npix, npix)),
+                    extent = extent, norm = norm,
+                    origin = 'lower')
+    cb = pylab.colorbar(axi, norm = norm)
+    showme = _MPLFixImage(data_source, axi, field, cb, cls)
+    ax.callbacks.connect("xlim_changed", showme)
+    ax.callbacks.connect("ylim_changed", showme)
+    ax.set_xlim(extent[0], extent[1])
+    ax.set_ylim(extent[2], extent[3])
+    return ax


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -414,7 +414,7 @@
             self.zoom(f)
             yield self.snapshot()
 
-    def move_to(self, final, n_steps, final_width=None, exponential=True):
+    def move_to(self, final, n_steps, final_width=None, exponential=False):
         r"""Loop over a look_at
 
         This will yield `n_steps` snapshots until the current view has been
@@ -448,6 +448,8 @@
                     # front/back, left/right, top/bottom
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
+	    else:
+		dW = 1.0
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
@@ -456,6 +458,8 @@
                     width = na.array([final_width, final_width, final_width]) 
                     # front/back, left/right, top/bottom
                 dW = (1.0*final_width-na.array(self.width))/n_steps
+	    else:
+		dW = 1.0
             dx = (na.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
@@ -533,6 +537,62 @@
 
 data_object_registry["camera"] = Camera
 
+class InteractiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain',expand_factor=1.0,
+                 le=None, re=None):
+        self.frames = []
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = north_vector, steady_north=steady_north,
+                 volume = volume, fields = fields,
+                 log_fields = log_fields,
+                 sub_samples = sub_samples, pf = pf,
+                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
+                 tree_type=tree_type,expand_factor=expand_factor,
+                 le=le, re=re)
+
+    def snapshot(self, fn = None, clip_ratio = None):
+        import matplotlib
+        matplotlib.pylab.figure(2)
+        self.transfer_function.show()
+        matplotlib.pylab.draw()
+        im = Camera.snapshot(self, fn, clip_ratio)
+        matplotlib.pylab.figure(1)
+        matplotlib.pylab.imshow(im/im.max())
+        matplotlib.pylab.draw()
+        self.frames.append(im)
+        
+    def rotation(self, theta, n_steps, rot_vector=None):
+        for frame in Camera.rotation(self, theta, n_steps, rot_vector):
+            if frame is not None:
+                self.frames.append(frame)
+                
+    def zoomin(self, final, n_steps):
+        for frame in Camera.zoomin(self, final, n_steps):
+            if frame is not None:
+                self.frames.append(frame)
+                
+    def clear_frames(self):
+        del self.frames
+        self.frames = []
+        
+    def save_frames(self, basename, clip_ratio=None):
+        for i, frame in enumerate(self.frames):
+            fn = basename + '_%04i.png'%i
+            if clip_ratio is not None:
+                write_bitmap(frame, fn, clip_ratio*image.std())
+            else:
+                write_bitmap(frame, fn)
+
+data_object_registry["interactive_camera"] = InteractiveCamera
+
 class PerspectiveCamera(Camera):
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
@@ -796,5 +856,5 @@
         image *= dl
     else:
         image /= vals[:,:,1]
-        pf.field_info._field_list.pop("temp_weightfield")
+        pf.field_info.pop("temp_weightfield")
     return image


diff -r 73a023c40f7b80f8b51fd54a72eab993744549cf -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -199,6 +199,28 @@
         pylab.ylim(0.0, 1.0)
         pylab.savefig(filename)
 
+    def show(self):
+        r"""Display an image of the transfer function
+
+        This function loads up matplotlib and displays the current transfer function.
+
+        Parameters
+        ----------
+
+        Examples
+        --------
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+        >>> tf.show()
+        """
+        import matplotlib;import pylab
+        pylab.clf()
+        pylab.plot(self.x, self.y, 'xk-')
+        pylab.xlim(*self.x_bounds)
+        pylab.ylim(0.0, 1.0)
+        pylab.draw()
+
 class MultiVariateTransferFunction(object):
     def __init__(self):
         r"""This object constructs a set of field tables that allow for
@@ -447,6 +469,46 @@
         ax.set_xlabel("Value")
         pyplot.savefig(filename)
 
+    def show(self):
+        r"""Display an image of the transfer function
+
+        This function loads up matplotlib and displays the current transfer function.
+
+        Parameters
+        ----------
+
+        Examples
+        --------
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+        >>> tf.show()
+        """
+        from matplotlib import pyplot
+        from matplotlib.ticker import FuncFormatter
+        pyplot.clf()
+        ax = pyplot.axes()
+        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        ax.imshow(i_data, origin='lower')
+        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.set_xlim(0, self.alpha.x.size)
+        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
+        ax.xaxis.set_ticks(xticks)
+        def x_format(x, pos):
+            return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
+        ax.xaxis.set_major_formatter(FuncFormatter(x_format))
+        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        ax.yaxis.set_ticks(yticks)
+        def y_format(y, pos):
+            return (y / self.alpha.y.size)
+        ax.yaxis.set_major_formatter(FuncFormatter(y_format))
+        ax.set_ylabel("Transmission")
+        ax.set_xlabel("Value")
+        
     def sample_colormap(self, v, w, alpha=None, colormap="gist_stern", col_bounds=None):
         r"""Add a Gaussian based on an existing colormap.
 



https://bitbucket.org/yt_analysis/yt/changeset/b063c4158587/
changeset:   b063c4158587
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 17:24:57
summary:     Adding geometry_utils to amr_utils.pyx, moving a few routines into there.
affected #:  3 files

diff -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b -r b063c415858751476b83c37e56b9b36d49d2245b yt/utilities/_amr_utils/geometry_utils.pyx
--- a/yt/utilities/_amr_utils/geometry_utils.pyx
+++ b/yt/utilities/_amr_utils/geometry_utils.pyx
@@ -28,6 +28,17 @@
 cimport cython
 from stdlib cimport malloc, free, abs
 
+# These routines are separated into a couple different categories:
+#
+#   * Routines for identifying intersections of an object with a bounding box
+#   * Routines for identifying cells/points inside a bounding box that
+#     intersect with an object
+#   * Routines that speed up some type of geometric calculation
+
+# First, bounding box / object intersection routines.
+# These all respect the interface "dobj" and a set of left_edges, right_edges,
+# sometimes also accepting level and mask information.
+
 def ortho_ray_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
                           np.ndarray[np.float64_t, ndim=2] right_edges):
     cdef int i
@@ -62,7 +73,7 @@
             i1 = (ax+1) % 3
             i2 = (ax+2) % 3
             t = (left_edges[gi,ax] - p0[ax])/v[ax]
-            for i in range(3)
+            for i in range(3):
                 vs[i] = t * v[i] + p0[i]
             if left_edges[gi,i1] <= vs[i1] and \
                right_edges[gi,i1] >= vs[i1] and \
@@ -71,7 +82,7 @@
                 gridi[gi] = 1
                 break
             t = (right_edges[gi,ax] - p0[ax])/v[ax]
-            for i in range(3)
+            for i in range(3):
                 vs[i] = t * v[i] + p0[i]
             if left_edges[gi,i1] <= vs[i1] and \
                right_edges[gi,i1] >= vs[i1] and \
@@ -79,22 +90,155 @@
                right_edges[gi,i2] >= vs[i2]:
                 gridi[gi] = 1
                 break
-        if gridi[gi] = 1: continue
+        if gridi[gi] == 1: continue
         # if the point is fully enclosed, we count the grid
         if left_edges[gi,0] <= p0[0] and \
-           right_edges[gi,0] >= p0[0]:
+           right_edges[gi,0] >= p0[0] and \
            left_edges[gi,1] <= p0[1] and \
-           right_edges[gi,1] >= p0[1]:
+           right_edges[gi,1] >= p0[1] and \
            left_edges[gi,2] <= p0[2] and \
            right_edges[gi,2] >= p0[2]:
             gridi[gi] = 1
             continue
         if left_edges[gi,0] <= p1[0] and \
-           right_edges[gi,0] >= p1[0]:
+           right_edges[gi,0] >= p1[0] and \
            left_edges[gi,1] <= p1[1] and \
-           right_edges[gi,1] >= p1[1]:
+           right_edges[gi,1] >= p1[1] and \
            left_edges[gi,2] <= p1[2] and \
            right_edges[gi,2] >= p1[2]:
             gridi[gi] = 1
             continue
     return gridi
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask,
+                        int min_index = 0):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        if i < min_index or levels[i,0] != level:
+            mask[i] = 0
+            continue
+        inside = 1
+        for n in range(3):
+            if left_edge[n] >= right_edges[i,n] or \
+               right_edge[n] <= left_edges[i,n]:
+                inside = 0
+                break
+        if inside == 1: mask[i] = 1
+        else: mask[i] = 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def get_box_grids_below_level(
+                        np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        mask[i] = 0
+        if levels[i,0] <= level:
+            inside = 1
+            for n in range(3):
+                if left_edge[n] >= right_edges[i,n] or \
+                   right_edge[n] <= left_edges[i,n]:
+                    inside = 0
+                    break
+            if inside == 1: mask[i] = 1
+
+# Finally, miscellaneous routines.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def find_values_at_point(np.ndarray[np.float64_t, ndim=1] point,
+                         np.ndarray[np.float64_t, ndim=2] left_edges,
+                         np.ndarray[np.float64_t, ndim=2] right_edges,
+                         np.ndarray[np.int32_t, ndim=2] dimensions,
+                         field_names, grid_objects):
+    # This iterates in order, first to last, and then returns with the first
+    # one in which the point is located; this means if you order from highest
+    # level to lowest, you will find the correct grid without consulting child
+    # masking.  Note also that we will do a few relatively slow operations on
+    # strings and whatnot, but they should not be terribly slow.
+    cdef int ind[3], gi, fi
+    cdef int nf = len(field_names)
+    cdef np.float64_t dds
+    cdef np.ndarray[np.float64_t, ndim=3] field
+    cdef np.ndarray[np.float64_t, ndim=1] rv = np.zeros(nf, dtype='float64')
+    for gi in range(left_edges.shape[0]):
+        if not ((left_edges[gi,0] < point[0] < right_edges[gi,0])
+            and (left_edges[gi,1] < point[1] < right_edges[gi,1])
+            and (left_edges[gi,2] < point[2] < right_edges[gi,2])):
+            continue
+        # We found our grid!
+        for fi in range(3):
+            dds = ((right_edges[gi,fi] - left_edges[gi,fi])/
+                   (<np.float64_t> dimensions[gi,fi]))
+            ind[fi] = <int> ((point[fi] - left_edges[gi,fi])/dds)
+        grid = grid_objects[gi]
+        for fi in range(nf):
+            field = grid[field_names[fi]]
+            rv[fi] = field[ind[0], ind[1], ind[2]]
+        return rv
+    raise KeyError
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rvec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] xf
+    cdef np.ndarray[np.float64_t, ndim=1] yf
+    cdef np.ndarray[np.float64_t, ndim=1] zf
+    cdef np.ndarray[np.float64_t, ndim=2] rf
+    cdef np.ndarray[np.float64_t, ndim=3] xg
+    cdef np.ndarray[np.float64_t, ndim=3] yg
+    cdef np.ndarray[np.float64_t, ndim=3] zg
+    cdef np.ndarray[np.float64_t, ndim=4] rg
+    cdef np.float64_t c[3]
+    cdef int i, j, k
+    center = data.get_field_parameter("center")
+    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
+    if len(data['x'].shape) == 1:
+        # One dimensional data
+        xf = data['x']
+        yf = data['y']
+        zf = data['z']
+        rf = np.empty((3, xf.shape[0]), 'float64')
+        for i in range(xf.shape[0]):
+            rf[0, i] = xf[i] - c[0]
+            rf[1, i] = yf[i] - c[1]
+            rf[2, i] = zf[i] - c[2]
+        return rf
+    else:
+        # Three dimensional data
+        xg = data['x']
+        yg = data['y']
+        zg = data['z']
+        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
+        for i in range(xg.shape[0]):
+            for j in range(xg.shape[1]):
+                for k in range(xg.shape[2]):
+                    rg[0,i,j,k] = xg[i,j,k] - c[0]
+                    rg[1,i,j,k] = yg[i,j,k] - c[1]
+                    rg[2,i,j,k] = zg[i,j,k] - c[2]
+        return rg
+


diff -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b -r b063c415858751476b83c37e56b9b36d49d2245b yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -61,6 +61,65 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
+                       np.ndarray[np.float64_t, ndim=1] l_corner,
+                       np.ndarray[np.float64_t, ndim=1] r_corner):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, n_grids, addit, my_split
+    n_grids = data.shape[0]
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                #print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i, j, dim] and
+                        data[i, j, dim] < r_corner[dim]):
+                    #print "Skipping ", data[i,j,dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i, j, dim]:
+                        skipit = 1
+                        #print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i, j, dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        #print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
+    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+    for i in range(n_grids):
+        if data[i, 0, best_dim] < split:
+            less_ids[i] = 1
+        else:
+            less_ids[i] = 0
+        if data[i, 1, best_dim] > split:
+            greater_ids[i] = 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
                         np.ndarray[np.float64_t, ndim=1] right_edge,
                         int level,
@@ -188,62 +247,3 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
-                       np.ndarray[np.float64_t, ndim=1] l_corner,
-                       np.ndarray[np.float64_t, ndim=1] r_corner):
-    cdef int i, j, k, dim, n_unique, best_dim, n_best, n_grids, addit, my_split
-    n_grids = data.shape[0]
-    cdef np.float64_t **uniquedims, *uniques, split
-    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
-    for i in range(3):
-        uniquedims[i] = <np.float64_t *> \
-                alloca(2*n_grids * sizeof(np.float64_t))
-    my_max = 0
-    for dim in range(3):
-        n_unique = 0
-        uniques = uniquedims[dim]
-        for i in range(n_grids):
-            # Check for disqualification
-            for j in range(2):
-                #print "Checking against", i,j,dim,data[i,j,dim]
-                if not (l_corner[dim] < data[i, j, dim] and
-                        data[i, j, dim] < r_corner[dim]):
-                    #print "Skipping ", data[i,j,dim]
-                    continue
-                skipit = 0
-                # Add our left ...
-                for k in range(n_unique):
-                    if uniques[k] == data[i, j, dim]:
-                        skipit = 1
-                        #print "Identified", uniques[k], data[i,j,dim], n_unique
-                        break
-                if skipit == 0:
-                    uniques[n_unique] = data[i, j, dim]
-                    n_unique += 1
-        if n_unique > my_max:
-            best_dim = dim
-            my_max = n_unique
-            my_split = (n_unique-1)/2
-    # I recognize how lame this is.
-    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
-    for i in range(my_max):
-        #print "Setting tarr: ", i, uniquedims[best_dim][i]
-        tarr[i] = uniquedims[best_dim][i]
-    tarr.sort()
-    split = tarr[my_split]
-    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
-    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
-    for i in range(n_grids):
-        if data[i, 0, best_dim] < split:
-            less_ids[i] = 1
-        else:
-            less_ids[i] = 0
-        if data[i, 1, best_dim] > split:
-            greater_ids[i] = 1
-        else:
-            greater_ids[i] = 0
-    # Return out unique values
-    return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")


diff -r 244b8f82e4cc12d25fe1611f5da93e5b26fc144b -r b063c415858751476b83c37e56b9b36d49d2245b yt/utilities/amr_utils.pyx
--- a/yt/utilities/amr_utils.pyx
+++ b/yt/utilities/amr_utils.pyx
@@ -48,3 +48,4 @@
 include "_amr_utils/Octree.pyx"
 include "_amr_utils/freetype_writer.pyx"
 include "_amr_utils/misc_utilities.pyx"
+include "_amr_utils/geometry_utils.pyx"



https://bitbucket.org/yt_analysis/yt/changeset/dc4adb1cc614/
changeset:   dc4adb1cc614
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 18:14:00
summary:     Fixing an import error that showed up from the merge from yt.
affected #:  1 file

diff -r b063c415858751476b83c37e56b9b36d49d2245b -r dc4adb1cc6145cf6ca368cfcfc44ea4def3687a8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,7 +41,7 @@
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
     QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
-    march_cubes_grid, march_cubes_grid_flux
+    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -556,7 +556,7 @@
         return (self.px, self.py)
 
     def _get_list_of_grids(self):
-        gi = au.ortho_ray_grids(self, 
+        gi = ortho_ray_grids(self, 
                 self.hierarchy.grid_left_edges,
                 self.hierarchy.grid_right_edges)
         self._grids = self.hierarchy.grids[gi]
@@ -629,7 +629,7 @@
         #self._refresh_data()
 
     def _get_list_of_grids(self):
-        gi = au.ray_grids(self,
+        gi = ray_grids(self,
                 self.hierarchy.grid_left_edges,
                 self.hierarchy.grid_right_edges)
         self._grids = self.hierarchy.grids[gi]



https://bitbucket.org/yt_analysis/yt/changeset/619ccb84cbff/
changeset:   619ccb84cbff
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 18:16:00
summary:     Fixing another typo.
affected #:  1 file

diff -r dc4adb1cc6145cf6ca368cfcfc44ea4def3687a8 -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -557,8 +557,8 @@
 
     def _get_list_of_grids(self):
         gi = ortho_ray_grids(self, 
-                self.hierarchy.grid_left_edges,
-                self.hierarchy.grid_right_edges)
+                self.hierarchy.grid_left_edge,
+                self.hierarchy.grid_right_edge)
         self._grids = self.hierarchy.grids[gi]
 
     def _get_data_from_grid(self, grid, field):
@@ -630,8 +630,8 @@
 
     def _get_list_of_grids(self):
         gi = ray_grids(self,
-                self.hierarchy.grid_left_edges,
-                self.hierarchy.grid_right_edges)
+                self.hierarchy.grid_left_edge,
+                self.hierarchy.grid_right_edge)
         self._grids = self.hierarchy.grids[gi]
 
     def _get_data_from_grid(self, grid, field):



https://bitbucket.org/yt_analysis/yt/changeset/7255b565094c/
changeset:   7255b565094c
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 18:17:46
summary:     Refactoring the _amr_utils directory to have individual .so files, which should
speed up the compilation time when one or another item gets changed.  We will
probably at some point add back in the option of compiling into a monolithic
library (which works better for static linking) but for now this is fine.
affected #:  10 files

diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -23,6 +23,10 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+cimport numpy as np
+cimport cython
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def CICDeposit_3(np.ndarray[np.float64_t, ndim=1] posx,


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/Interpolators.pyx
--- a/yt/utilities/_amr_utils/Interpolators.pyx
+++ b/yt/utilities/_amr_utils/Interpolators.pyx
@@ -26,6 +26,7 @@
 import numpy as np
 cimport numpy as np
 cimport cython
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 
 @cython.cdivision(True)
 @cython.wraparound(False)


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/Octree.pyx
--- a/yt/utilities/_amr_utils/Octree.pyx
+++ b/yt/utilities/_amr_utils/Octree.pyx
@@ -30,6 +30,7 @@
 cimport numpy as cnp
 cimport cython
 
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 from stdlib cimport malloc, free, abs
 
 import sys, time


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -29,31 +29,7 @@
 cimport kdtree_utils
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
-
-cdef inline int imax(int i0, int i1):
-    if i0 > i1: return i0
-    return i1
-
-cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
-    if f0 > f1: return f0
-    return f1
-
-cdef inline int imin(int i0, int i1):
-    if i0 < i1: return i0
-    return i1
-
-cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
-    if f0 < f1: return f0
-    return f1
-
-cdef inline int iclip(int i, int a, int b):
-    if i < a: return a
-    if i > b: return b
-    return i
-
-cdef inline np.float64_t fclip(np.float64_t f,
-                      np.float64_t a, np.float64_t b):
-    return fmin(fmax(f, a), b)
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 
 cdef extern from "math.h":
     double exp(double x)




diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/fp_utils.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -0,0 +1,53 @@
+"""
+Shareable definitions for common fp/int Cython utilities
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+cimport cython
+
+cdef inline int imax(int i0, int i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+    if f0 > f1: return f0
+    return f1
+
+cdef inline int imin(int i0, int i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+    if f0 < f1: return f0
+    return f1
+
+cdef inline int iclip(int i, int a, int b):
+    if i < a: return a
+    if i > b: return b
+    return i
+
+cdef inline np.float64_t fclip(np.float64_t f,
+                      np.float64_t a, np.float64_t b):
+    return fmin(fmax(f, a), b)
+


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/png_writer.pyx
--- a/yt/utilities/_amr_utils/png_writer.pyx
+++ b/yt/utilities/_amr_utils/png_writer.pyx
@@ -26,9 +26,10 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from libc.stdlib cimport malloc, realloc
+from libc.stdlib cimport malloc, realloc, free
 from libc.string cimport memcpy
 from cpython.string cimport PyString_FromStringAndSize
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 
 from stdio cimport fopen, fclose, FILE
 


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/_amr_utils/setup.py
--- /dev/null
+++ b/yt/utilities/_amr_utils/setup.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path, glob
+
+def check_for_png():
+    # First up: HDF5_DIR in environment
+    if "PNG_DIR" in os.environ:
+        png_dir = os.environ["PNG_DIR"]
+        png_inc = os.path.join(png_dir, "include")
+        png_lib = os.path.join(png_dir, "lib")
+        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
+        return (png_inc, png_lib)
+    # Next up, we try png.cfg
+    elif os.path.exists("png.cfg"):
+        png_dir = open("png.cfg").read().strip()
+        png_inc = os.path.join(png_dir, "include")
+        png_lib = os.path.join(png_dir, "lib")
+        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
+        return (png_inc, png_lib)
+    # Now we see if ctypes can help us:
+    try:
+        import ctypes.util
+        png_libfile = ctypes.util.find_library("png")
+        if png_libfile is not None and os.path.isfile(png_libfile):
+            # Now we've gotten a library, but we'll need to figure out the
+            # includes if this is going to work.  It feels like there is a
+            # better way to pull off two directory names.
+            png_dir = os.path.dirname(os.path.dirname(png_libfile))
+            if os.path.isdir(os.path.join(png_dir, "include")) and \
+               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
+                png_inc = os.path.join(png_dir, "include")
+                png_lib = os.path.join(png_dir, "lib")
+                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
+                return png_inc, png_lib
+    except ImportError:
+        pass
+    # X11 is where it's located by default on OSX, although I am slightly
+    # reluctant to link against that one.
+    for png_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
+        if os.path.isfile(os.path.join(png_dir, "include", "png.h")):
+            if os.path.isdir(os.path.join(png_dir, "include")) and \
+               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
+                png_inc = os.path.join(png_dir, "include")
+                png_lib = os.path.join(png_dir, "lib")
+                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
+                return png_inc, png_lib
+    print "Reading png location from png.cfg failed."
+    print "Please place the base directory of your png install in png.cfg and restart."
+    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
+    sys.exit(1)
+
+def check_for_freetype():
+    # First up: environment
+    if "FTYPE_DIR" in os.environ:
+        freetype_dir = os.environ["FTYPE_DIR"]
+        freetype_inc = os.path.join(freetype_dir, "include")
+        freetype_lib = os.path.join(freetype_dir, "lib")
+        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
+        return (freetype_inc, freetype_lib)
+    # Next up, we try freetype.cfg
+    elif os.path.exists("freetype.cfg"):
+        freetype_dir = open("freetype.cfg").read().strip()
+        freetype_inc = os.path.join(freetype_dir, "include")
+        freetype_lib = os.path.join(freetype_dir, "lib")
+        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
+        return (freetype_inc, freetype_lib)
+    # Now we see if ctypes can help us:
+    try:
+        import ctypes.util
+        freetype_libfile = ctypes.util.find_library("freetype")
+        if freetype_libfile is not None and os.path.isfile(freetype_libfile):
+            # Now we've gotten a library, but we'll need to figure out the
+            # includes if this is going to work.  It feels like there is a
+            # better way to pull off two directory names.
+            freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
+            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
+               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                freetype_inc = os.path.join(freetype_dir, "include")
+                freetype_lib = os.path.join(freetype_dir, "lib")
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                return freetype_inc, freetype_lib
+    except ImportError:
+        pass
+    # X11 is where it's located by default on OSX, although I am slightly
+    # reluctant to link against that one.
+    for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
+        if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
+               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                freetype_inc = os.path.join(freetype_dir, "include")
+                freetype_lib = os.path.join(freetype_dir, "lib")
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                return freetype_inc, freetype_lib
+    print "Reading freetype location from freetype.cfg failed."
+    print "Please place the base directory of your freetype install in freetype.cfg and restart."
+    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
+    print "You can locate this by looking for the file ft2build.h"
+    sys.exit(1)
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('lib',parent_package,top_path)
+    png_inc, png_lib = check_for_png()
+    freetype_inc, freetype_lib = check_for_freetype()
+    # Because setjmp.h is included by lots of things, and because libpng hasn't
+    # always properly checked its header files (see
+    # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
+    # support for setjmp.
+    config.add_extension("CICDeposit", 
+                ["yt/utilities/_amr_utils/CICDeposit.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("ContourFinding", 
+                ["yt/utilities/_amr_utils/ContourFinding.pyx",
+                 "yt/utilities/_amr_utils/union_find.c"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("DepthFirstOctree", 
+                ["yt/utilities/_amr_utils/DepthFirstOctree.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("fortran_reader", 
+                ["yt/utilities/_amr_utils/fortran_reader.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("freetype_writer", 
+                ["yt/utilities/_amr_utils/freetype_writer.pyx"],
+                include_dirs = [os.path.join(freetype_inc, "freetype2")],
+                library_dirs = [freetype_lib], libraries=["freetype"],
+                depends=["yt/utilities/_amr_utils/freetype_includes.h"])
+    config.add_extension("geometry_utils", 
+                ["yt/utilities/_amr_utils/geometry_utils.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("Interpolators", 
+                ["yt/utilities/_amr_utils/Interpolators.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("misc_utilities", 
+                ["yt/utilities/_amr_utils/misc_utilities.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("Octree", 
+                ["yt/utilities/_amr_utils/Octree.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("png_writer", 
+                ["yt/utilities/_amr_utils/png_writer.pyx"],
+                define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
+                include_dirs=[png_inc],
+                library_dirs=[png_lib],
+                libraries=["m", "png"],
+                depends=["yt/utilities/_amr_utils/fp_utils.pxd"]),
+    config.add_extension("PointsInVolume", 
+                ["yt/utilities/_amr_utils/PointsInVolume.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("QuadTree", 
+                ["yt/utilities/_amr_utils/QuadTree.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("RayIntegrators", 
+                ["yt/utilities/_amr_utils/RayIntegrators.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("VolumeIntegrator", 
+               ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
+                "yt/utilities/_amr_utils/FixedInterpolator.c",
+                "yt/utilities/_amr_utils/kdtree.c"] +
+                 glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
+               include_dirs=["yt/utilities/_amr_utils/"],
+               libraries=["m"], 
+               depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
+                          "yt/utilities/_amr_utils/fp_utils.pxd",
+                          "yt/utilities/_amr_utils/healpix_interface.pxd",
+                          "yt/utilities/_amr_utils/endian_swap.h",
+                          "yt/utilities/_amr_utils/FixedInterpolator.h",
+                          "yt/utilities/_amr_utils/healpix_vectors.h",
+                          "yt/utilities/_amr_utils/kdtree.h",
+                          "yt/utilities/_amr_utils/healpix_ang2pix_nest.c",
+                          "yt/utilities/_amr_utils/healpix_mk_pix2xy.c",
+                          "yt/utilities/_amr_utils/healpix_mk_xy2pix.c",
+                          "yt/utilities/_amr_utils/healpix_pix2ang_nest.c",
+                          "yt/utilities/_amr_utils/healpix_pix2vec_nest.c",
+                          "yt/utilities/_amr_utils/healpix_vec2pix_nest.c"]
+          )
+    config.make_config_py() # installs __config__.py
+    return config


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/amr_utils.py
--- /dev/null
+++ b/yt/utilities/amr_utils.py
@@ -0,0 +1,39 @@
+"""
+Compatibility module
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from ._amr_utils.CICDeposit import *
+from ._amr_utils.ContourFinding import *
+from ._amr_utils.DepthFirstOctree import *
+from ._amr_utils.fortran_reader import *
+from ._amr_utils.freetype_writer import *
+from ._amr_utils.geometry_utils import *
+from ._amr_utils.Interpolators import *
+from ._amr_utils.misc_utilities import *
+from ._amr_utils.Octree import *
+from ._amr_utils.png_writer import *
+from ._amr_utils.PointsInVolume import *
+from ._amr_utils.QuadTree import *
+from ._amr_utils.RayIntegrators import *
+from ._amr_utils.VolumeIntegrator import *


diff -r 619ccb84cbff25e5e84ef0f273e13b14c663e63a -r 7255b565094cff2311b63492264fc1f5a8f71d0d yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -2,101 +2,6 @@
 import setuptools
 import os, sys, os.path, glob
 
-def check_for_png():
-    # First up: HDF5_DIR in environment
-    if "PNG_DIR" in os.environ:
-        png_dir = os.environ["PNG_DIR"]
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
-    # Next up, we try png.cfg
-    elif os.path.exists("png.cfg"):
-        png_dir = open("png.cfg").read().strip()
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        png_libfile = ctypes.util.find_library("png")
-        if png_libfile is not None and os.path.isfile(png_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            png_dir = os.path.dirname(os.path.dirname(png_libfile))
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for png_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
-    sys.exit(1)
-
-def check_for_freetype():
-    # First up: environment
-    if "FTYPE_DIR" in os.environ:
-        freetype_dir = os.environ["FTYPE_DIR"]
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
-    # Next up, we try freetype.cfg
-    elif os.path.exists("freetype.cfg"):
-        freetype_dir = open("freetype.cfg").read().strip()
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        freetype_libfile = ctypes.util.find_library("freetype")
-        if freetype_libfile is not None and os.path.isfile(freetype_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
-    print "You can locate this by looking for the file ft2build.h"
-    sys.exit(1)
-
 def check_for_hdf5():
     # First up: HDF5_DIR in environment
     if "HDF5_DIR" in os.environ:
@@ -137,12 +42,6 @@
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('utilities',parent_package,top_path)
-    png_inc, png_lib = check_for_png()
-    freetype_inc, freetype_lib = check_for_freetype()
-    # Because setjmp.h is included by lots of things, and because libpng hasn't
-    # always properly checked its header files (see
-    # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
-    # support for setjmp.
     config.add_subpackage("amr_kdtree")
     config.add_subpackage("answer_testing")
     config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
@@ -150,6 +49,7 @@
     config.add_data_files(('kdtree', ['kdtree/fKDpy.so',]))
     config.add_subpackage("spatial")
     config.add_subpackage("parallel_tools")
+    config.add_subpackage("_amr_utils")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
     hdf5_inc, hdf5_lib = check_for_hdf5()
@@ -159,25 +59,6 @@
                          define_macros=[("H5_USE_16_API",True)],
                          libraries=["m","hdf5"],
                          library_dirs=library_dirs, include_dirs=include_dirs)
-    config.add_extension("amr_utils", 
-        ["yt/utilities/amr_utils.pyx",
-         "yt/utilities/_amr_utils/FixedInterpolator.c",
-         "yt/utilities/_amr_utils/kdtree.c",
-         "yt/utilities/_amr_utils/union_find.c"] +
-         glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
-        define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
-        include_dirs=["yt/utilities/_amr_utils/", png_inc,
-                      freetype_inc, os.path.join(freetype_inc, "freetype2")],
-        library_dirs=[png_lib, freetype_lib],
-        libraries=["m", "png", "freetype"],
-        depends=glob.glob("yt/utilities/_amr_utils/*.pyx") +
-                glob.glob("yt/utilities/_amr_utils/*.h") +
-                glob.glob("yt/utilities/_amr_utils/*.c"),
-        )
-    #config.add_extension("voropp",
-    #    ["yt/utilities/voropp.pyx"],
-    #    language="c++",
-    #    include_dirs=["yt/utilities/voro++"])
     config.add_extension("libconfig_wrapper", 
         ["yt/utilities/libconfig_wrapper.pyx"] +
          glob.glob("yt/utilities/_libconfig/*.c"), 



https://bitbucket.org/yt_analysis/yt/changeset/a6eb89e1ea3d/
changeset:   a6eb89e1ea3d
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 18:20:32
summary:     Fixing typo in the dtype: int32_t => int32 inside the string for np.zeros.
affected #:  1 file

diff -r 7255b565094cff2311b63492264fc1f5a8f71d0d -r a6eb89e1ea3db0d70cc9640d6db07e3534c2b079 yt/utilities/_amr_utils/geometry_utils.pyx
--- a/yt/utilities/_amr_utils/geometry_utils.pyx
+++ b/yt/utilities/_amr_utils/geometry_utils.pyx
@@ -61,7 +61,7 @@
     cdef int i, ax
     cdef int i1, i2
     cdef int ng = left_edges.shape[0]
-    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32_t')
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
     cdef np.float64_t vs[3], t, p0[3], p1[3], v[3]
     for i in range(3):
         p0[i] = dobj.start_point[i]



https://bitbucket.org/yt_analysis/yt/changeset/ebfc02c87997/
changeset:   ebfc02c87997
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-12 18:25:57
summary:     Adding the appropriate .c files to .hgignore
affected #:  1 file

diff -r a6eb89e1ea3db0d70cc9640d6db07e3534c2b079 -r ebfc02c8799705dc3bbf03502460500ebc0bab31 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -8,6 +8,22 @@
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
+yt/utilities/_amr_utils/CICDeposit.c
+yt/utilities/_amr_utils/ContourFinding.c
+yt/utilities/_amr_utils/DepthFirstOctree.c
+yt/utilities/_amr_utils/FixedInterpolator.c
+yt/utilities/_amr_utils/fortran_reader.c
+yt/utilities/_amr_utils/freetype_writer.c
+yt/utilities/_amr_utils/geometry_utils.c
+yt/utilities/_amr_utils/Interpolators.c
+yt/utilities/_amr_utils/kdtree.c
+yt/utilities/_amr_utils/misc_utilities.c
+yt/utilities/_amr_utils/Octree.c
+yt/utilities/_amr_utils/png_writer.c
+yt/utilities/_amr_utils/PointsInVolume.c
+yt/utilities/_amr_utils/QuadTree.c
+yt/utilities/_amr_utils/RayIntegrators.c
+yt/utilities/_amr_utils/VolumeIntegrator.c
 syntax: glob
 *.pyc
 .*.swp



https://bitbucket.org/yt_analysis/yt/changeset/1c52d8c13177/
changeset:   1c52d8c13177
branch:      yt
user:        MatthewTurk
date:        2011-11-14 00:49:35
summary:     First pass at reimplementing light source
affected #:  2 files

diff -r e9d4dba7c151996557b7c77a9d805d9ee863b365 -r 1c52d8c131772938de81e351f6bc0ffcfdc16b50 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -311,6 +311,9 @@
     cdef int n_fields
     cdef int n_field_tables
     cdef public int ns
+    cdef int grad
+    cdef np.float64_t light_source_v[3]
+    cdef np.float64_t light_source_c[3]
 
     # These are the field tables and their affiliated storage.
     # We have one field_id for every table.  Note that a single field can
@@ -365,6 +368,11 @@
         for i in range(6):
             self.field_table_ids[i] = tf_obj.field_table_ids[i]
             #print "Channel", i, "corresponds to", self.field_table_ids[i]
+
+        self.grad = tf_obj.grad_field
+        for i in range(3):
+            self.light_source_v[i] = tf_obj.light_source_v[i]
+            self.light_source_c[i] = tf_obj.light_source_c[i]
             
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -391,6 +399,12 @@
         for i in range(6):
             trgba[i] = istorage[self.field_table_ids[i]]
             #print i, trgba[i],
+        if self.grad != -1:
+            dot_prod = 0.0
+            for i in range(3):
+                dot_prod += grad[i] * self.light_source_v[i]
+            for i in range(3):
+                trgba[i] += dot_prod * self.light_source_c[i]
         #print
         # A few words on opacity.  We're going to be integrating equation 1.23
         # from Rybicki & Lightman.  dI_\nu / ds = -\alpha_\nu I_\nu + j_\nu
@@ -750,6 +764,9 @@
         for i in range(self.n_fields):
             slopes[i] = offset_interpolate(self.dims, dp,
                             self.data[i] + offset)
+            if tf.grad == i:
+                eval_gradient(self.dims, dp, self.data[i] + offset,
+                              grad)
         for i in range(3):
             dp[i] += ds[i] * tf.ns
         cdef np.float64_t temp


diff -r e9d4dba7c151996557b7c77a9d805d9ee863b365 -r 1c52d8c131772938de81e351f6bc0ffcfdc16b50 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -61,6 +61,8 @@
         self.x_bounds = x_bounds
         self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
         self.y = na.zeros(nbins, dtype='float64')
+        self.grad_field = -1
+        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -239,6 +241,8 @@
         self.weight_field_ids = [-1] * 6 # This correlates 
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
+        self.grad_field = -1
+        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):



https://bitbucket.org/yt_analysis/yt/changeset/2f33ba15d6bf/
changeset:   2f33ba15d6bf
branch:      yt
user:        MatthewTurk
date:        2011-11-14 01:30:08
summary:     Fixing the light implementation
affected #:  1 file

diff -r 1c52d8c131772938de81e351f6bc0ffcfdc16b50 -r 2f33ba15d6bfc0ea01715c4abc8b67cedf50c34a yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -376,10 +376,11 @@
             
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef void eval_transfer(self, np.float64_t dt, np.float64_t *dvs,
                                   np.float64_t *rgba, np.float64_t *grad):
         cdef int i, fid, use
-        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod, normalize
         # NOTE: We now disable this.  I have left it to ease the process of
         # potentially, one day, re-including it.
         #use = 0
@@ -403,8 +404,9 @@
             dot_prod = 0.0
             for i in range(3):
                 dot_prod += grad[i] * self.light_source_v[i]
+            if dot_prod < 0: dot_prod = 0.0
             for i in range(3):
-                trgba[i] += dot_prod * self.light_source_c[i]
+                trgba[i] *= dot_prod * self.light_source_c[i]
         #print
         # A few words on opacity.  We're going to be integrating equation 1.23
         # from Rybicki & Lightman.  dI_\nu / ds = -\alpha_\nu I_\nu + j_\nu



https://bitbucket.org/yt_analysis/yt/changeset/475484309e0a/
changeset:   475484309e0a
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-14 01:53:06
summary:     Merging the light source stuff in
affected #:  3 files

diff -r ebfc02c8799705dc3bbf03502460500ebc0bab31 -r 475484309e0a48edc075d8a20016dce1d80bb2b9 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -287,6 +287,9 @@
     cdef int n_fields
     cdef int n_field_tables
     cdef public int ns
+    cdef int grad
+    cdef np.float64_t light_source_v[3]
+    cdef np.float64_t light_source_c[3]
 
     # These are the field tables and their affiliated storage.
     # We have one field_id for every table.  Note that a single field can
@@ -341,13 +344,19 @@
         for i in range(6):
             self.field_table_ids[i] = tf_obj.field_table_ids[i]
             #print "Channel", i, "corresponds to", self.field_table_ids[i]
+
+        self.grad = tf_obj.grad_field
+        for i in range(3):
+            self.light_source_v[i] = tf_obj.light_source_v[i]
+            self.light_source_c[i] = tf_obj.light_source_c[i]
             
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef void eval_transfer(self, np.float64_t dt, np.float64_t *dvs,
                                   np.float64_t *rgba, np.float64_t *grad):
         cdef int i, fid, use
-        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod, normalize
         # NOTE: We now disable this.  I have left it to ease the process of
         # potentially, one day, re-including it.
         #use = 0
@@ -367,6 +376,13 @@
         for i in range(6):
             trgba[i] = istorage[self.field_table_ids[i]]
             #print i, trgba[i],
+        if self.grad != -1:
+            dot_prod = 0.0
+            for i in range(3):
+                dot_prod += grad[i] * self.light_source_v[i]
+            if dot_prod < 0: dot_prod = 0.0
+            for i in range(3):
+                trgba[i] *= dot_prod * self.light_source_c[i]
         #print
         # A few words on opacity.  We're going to be integrating equation 1.23
         # from Rybicki & Lightman.  dI_\nu / ds = -\alpha_\nu I_\nu + j_\nu
@@ -726,6 +742,9 @@
         for i in range(self.n_fields):
             slopes[i] = offset_interpolate(self.dims, dp,
                             self.data[i] + offset)
+            if tf.grad == i:
+                eval_gradient(self.dims, dp, self.data[i] + offset,
+                              grad)
         for i in range(3):
             dp[i] += ds[i] * tf.ns
         cdef np.float64_t temp


diff -r ebfc02c8799705dc3bbf03502460500ebc0bab31 -r 475484309e0a48edc075d8a20016dce1d80bb2b9 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -573,7 +573,7 @@
         else:
             p = pc.add_slice(opts.field, opts.axis)
         from yt.gui.reason.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.field_data, opts.field)
+        mapper = PannableMapServer(p.data, opts.field)
         import yt.utilities.bottle as bottle
         bottle.debug(True)
         if opts.host is not None:


diff -r ebfc02c8799705dc3bbf03502460500ebc0bab31 -r 475484309e0a48edc075d8a20016dce1d80bb2b9 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -61,6 +61,8 @@
         self.x_bounds = x_bounds
         self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
         self.y = na.zeros(nbins, dtype='float64')
+        self.grad_field = -1
+        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -239,6 +241,8 @@
         self.weight_field_ids = [-1] * 6 # This correlates 
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
+        self.grad_field = -1
+        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):



https://bitbucket.org/yt_analysis/yt/changeset/bfcf3519e081/
changeset:   bfcf3519e081
branch:      geometry_handling
user:        MatthewTurk
date:        2011-11-14 06:22:38
summary:     First, poor attempt at refactoring volume rendering
affected #:  2 files

diff -r 475484309e0a48edc075d8a20016dce1d80bb2b9 -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -30,6 +30,7 @@
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from ray_handling cimport integrate_ray, ray_sampler
 
 cdef extern from "math.h":
     double exp(double x)
@@ -481,6 +482,13 @@
 
 cdef struct AdaptiveRayPacket
 
+cdef class PartitionedGrid
+
+cdef struct VolumeRendererData:
+    np.float64_t *rgba
+    TransferFunctionProxy tf
+    PartitionedGrid pg
+
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object LeftEdge
@@ -596,189 +604,21 @@
                     if temp > extrema[3]: extrema[3] = temp
         #print extrema[0], extrema[1], extrema[2], extrema[3]
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int integrate_ray(self, np.float64_t v_pos[3],
-                                 np.float64_t v_dir[3],
-                                 np.float64_t rgba[4],
-                                 TransferFunctionProxy tf,
-                                 np.float64_t *return_t = NULL,
-                                 np.float64_t enter_t = -1.0):
-        cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
-        cdef np.float64_t intersect_t = 1.0
-        cdef np.float64_t iv_dir[3]
-        cdef np.float64_t intersect[3], tmax[3], tdelta[3]
-        cdef np.float64_t dist, alpha, dt, exit_t
-        cdef np.float64_t tr, tl, temp_x, temp_y, dv
-        for i in range(3):
-            if (v_dir[i] < 0):
-                step[i] = -1
-            elif (v_dir[i] == 0):
-                step[i] = 1
-                tmax[i] = 1e60
-                iv_dir[i] = 1e60
-                tdelta[i] = 1e-60
-                continue
-            else:
-                step[i] = 1
-            x = (i+1) % 3
-            y = (i+2) % 3
-            iv_dir[i] = 1.0/v_dir[i]
-            tl = (self.left_edge[i] - v_pos[i])*iv_dir[i]
-            temp_x = (v_pos[x] + tl*v_dir[x])
-            temp_y = (v_pos[y] + tl*v_dir[y])
-            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
-               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
-               0.0 <= tl and tl < intersect_t:
-                direction = i
-                intersect_t = tl
-            tr = (self.right_edge[i] - v_pos[i])*iv_dir[i]
-            temp_x = (v_pos[x] + tr*v_dir[x])
-            temp_y = (v_pos[y] + tr*v_dir[y])
-            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
-               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
-               0.0 <= tr and tr < intersect_t:
-                direction = i
-                intersect_t = tr
-        if self.left_edge[0] <= v_pos[0] and v_pos[0] <= self.right_edge[0] and \
-           self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
-           self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
-            intersect_t = 0.0
-        if enter_t >= 0.0: intersect_t = enter_t
-        if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
-        for i in range(3):
-            intersect[i] = v_pos[i] + intersect_t * v_dir[i]
-            cur_ind[i] = <int> floor((intersect[i] +
-                                      step[i]*1e-8*self.dds[i] -
-                                      self.left_edge[i])*self.idds[i])
-            tmax[i] = (((cur_ind[i]+step[i])*self.dds[i])+
-                        self.left_edge[i]-v_pos[i])*iv_dir[i]
-            # This deals with the asymmetry in having our indices refer to the
-            # left edge of a cell, but the right edge of the brick being one
-            # extra zone out.
-            if cur_ind[i] == self.dims[i] and step[i] < 0:
-                cur_ind[i] = self.dims[i] - 1
-            if cur_ind[i] < 0 or cur_ind[i] >= self.dims[i]: return 0
-            if step[i] > 0:
-                tmax[i] = (((cur_ind[i]+1)*self.dds[i])
-                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
-            if step[i] < 0:
-                tmax[i] = (((cur_ind[i]+0)*self.dds[i])
-                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
-            tdelta[i] = (self.dds[i]*iv_dir[i])
-            if tdelta[i] < 0: tdelta[i] *= -1
-        # We have to jumpstart our calculation
-        enter_t = intersect_t
-        hit = 0
-        while 1:
-            # dims here is one less than the dimensions of the data,
-            # but we are tracing on the grid, not on the data...
-            if (not (0 <= cur_ind[0] < self.dims[0])) or \
-               (not (0 <= cur_ind[1] < self.dims[1])) or \
-               (not (0 <= cur_ind[2] < self.dims[2])):
-                break
-            hit += 1
-            if tmax[0] < tmax[1]:
-                if tmax[0] < tmax[2]:
-                    exit_t = fmin(tmax[0], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[0] += step[0]
-                    enter_t = tmax[0]
-                    tmax[0] += tdelta[0]
-                else:
-                    exit_t = fmin(tmax[2], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[2] += step[2]
-                    enter_t = tmax[2]
-                    tmax[2] += tdelta[2]
-            else:
-                if tmax[1] < tmax[2]:
-                    exit_t = fmin(tmax[1], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[1] += step[1]
-                    enter_t = tmax[1]
-                    tmax[1] += tdelta[1]
-                else:
-                    exit_t = fmin(tmax[2], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[2] += step[2]
-                    enter_t = tmax[2]
-                    tmax[2] += tdelta[2]
-            if enter_t >= 1.0: break
-        if return_t != NULL: return_t[0] = exit_t
-        return hit
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef void sample_values(self,
-                            np.float64_t v_pos[3],
-                            np.float64_t v_dir[3],
-                            np.float64_t enter_t,
-                            np.float64_t exit_t,
-                            int ci[3],
-                            np.float64_t *rgba,
-                            TransferFunctionProxy tf):
-        cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
-        cdef np.float64_t grad[3], ds[3]
-        cdef np.float64_t local_dds[3], cell_left[3]
-        grad[0] = grad[1] = grad[2] = 0.0
-        cdef int dti, i
-        cdef kdtree_utils.kdres *ballq = NULL
-        dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
-        cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
-                        + ci[1] * (self.dims[2] + 1) + ci[2]
-        # The initial and final values can be linearly interpolated between; so
-        # we just have to calculate our initial and final values.
-        cdef np.float64_t slopes[6]
-        for i in range(3):
-            dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-            dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
-            dp[i] *= self.idds[i]
-            ds[i] = v_dir[i] * self.idds[i] * dt
-        for i in range(self.n_fields):
-            slopes[i] = offset_interpolate(self.dims, dp,
-                            self.data[i] + offset)
-            if tf.grad == i:
-                eval_gradient(self.dims, dp, self.data[i] + offset,
-                              grad)
-        for i in range(3):
-            dp[i] += ds[i] * tf.ns
-        cdef np.float64_t temp
-        for i in range(self.n_fields):
-            temp = slopes[i]
-            slopes[i] -= offset_interpolate(self.dims, dp,
-                             self.data[i] + offset)
-            slopes[i] *= -1.0/tf.ns
-            self.dvs[i] = temp
-        if self.star_list != NULL:
-            for i in range(3):
-                cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
-                # this gets us dp as the current first sample position
-                pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-                dp[i] -= tf.ns * ds[i]
-                local_dds[i] = v_dir[i] * dt
-            ballq = kdtree_utils.kd_nearest_range3(
-                self.star_list, cell_left[0] + self.dds[0]*0.5,
-                                cell_left[1] + self.dds[1]*0.5,
-                                cell_left[2] + self.dds[2]*0.5,
-                                self.star_er + 0.9*self.dds[0])
-                                            # ~0.866 + a bit
-        for dti in range(tf.ns): 
-            #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
-            #    continue
-            if self.star_list != NULL:
-                self.add_stars(ballq, dt, pos, rgba)
-                for i in range(3):
-                    dp[i] += ds[i]
-                    pos[i] += local_dds[i]
-            tf.eval_transfer(dt, self.dvs, rgba, grad)
-            for i in range(self.n_fields):
-                self.dvs[i] += slopes[i]
-        if ballq != NULL: kdtree_utils.kd_res_free(ballq)
+    cdef int integrate_ray(np.float64_t left_edge[3],
+                           np.float64_t right_edge[3],
+                           np.float64_t dds[3],
+                           np.float64_t idds[3],
+                           int dims[3],
+                           np.float64_t v_pos[3],
+                           np.float64_t v_dir[3],
+                           np.float64_t rgba[4],
+                           TransferFunctionProxy tf,
+                           np.float64_t *return_t = NULL,
+                           np.float64_t enter_t = -1.0):
+        integrate_ray(self.left_edge, self.right_edge,
+                      self.dds, self.idds, self.dims, v_pos,
+                      v_dir, rgba, tf, return_t, enter_t,
+                      self.sample_value)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -2006,3 +1846,72 @@
 # From Enzo:
 #   dOmega = 4 pi r^2/Nrays
 #   if (dOmega > RaysPerCell * dx^2) then split
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void pg_sample_values(np.float64_t v_pos[3],
+                        np.float64_t v_dir[3],
+                        np.float64_t enter_t,
+                        np.float64_t exit_t,
+                        int ci[3],
+                        void *rdata):
+    cdef VolumeRendererData *dd = <VolumeRendererData*> rdata
+    cdef PartitionedGrid self = dd.pg
+    cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
+    cdef np.float64_t grad[3], ds[3]
+    cdef np.float64_t local_dds[3], cell_left[3]
+    grad[0] = grad[1] = grad[2] = 0.0
+    cdef int dti, i
+    cdef kdtree_utils.kdres *ballq = NULL
+    dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
+    cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+                    + ci[1] * (self.dims[2] + 1) + ci[2]
+    # The initial and final values can be linearly interpolated between; so
+    # we just have to calculate our initial and final values.
+    cdef np.float64_t slopes[6]
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
+        dp[i] *= self.idds[i]
+        ds[i] = v_dir[i] * self.idds[i] * dt
+    for i in range(self.n_fields):
+        slopes[i] = offset_interpolate(self.dims, dp,
+                        self.data[i] + offset)
+        if tf.grad == i:
+            eval_gradient(self.dims, dp, self.data[i] + offset,
+                          grad)
+    for i in range(3):
+        dp[i] += ds[i] * tf.ns
+    cdef np.float64_t temp
+    for i in range(self.n_fields):
+        temp = slopes[i]
+        slopes[i] -= offset_interpolate(self.dims, dp,
+                         self.data[i] + offset)
+        slopes[i] *= -1.0/tf.ns
+        self.dvs[i] = temp
+    if self.star_list != NULL:
+        for i in range(3):
+            cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
+            # this gets us dp as the current first sample position
+            pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+            dp[i] -= tf.ns * ds[i]
+            local_dds[i] = v_dir[i] * dt
+        ballq = kdtree_utils.kd_nearest_range3(
+            self.star_list, cell_left[0] + self.dds[0]*0.5,
+                            cell_left[1] + self.dds[1]*0.5,
+                            cell_left[2] + self.dds[2]*0.5,
+                            self.star_er + 0.9*self.dds[0])
+                                        # ~0.866 + a bit
+    for dti in range(tf.ns): 
+        #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
+        #    continue
+        if self.star_list != NULL:
+            self.add_stars(ballq, dt, pos, rgba)
+            for i in range(3):
+                dp[i] += ds[i]
+                pos[i] += local_dds[i]
+        tf.eval_transfer(dt, self.dvs, rgba, grad)
+        for i in range(self.n_fields):
+            self.dvs[i] += slopes[i]
+    if ballq != NULL: kdtree_utils.kd_res_free(ballq)
+


diff -r 475484309e0a48edc075d8a20016dce1d80bb2b9 -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 yt/utilities/_amr_utils/ray_handling.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/ray_handling.pxd
@@ -0,0 +1,153 @@
+"""
+General purpose ray casting
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+cimport numpy as np
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
+ctypedef void (*ray_sampler) (np.float64_t v_pos[3],
+                              np.float64_t v_dir[3],
+                              np.float64_t enter_t,
+                              np.float64_t exit_t,
+                              int ci[3],
+                              void *rdata)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef int integrate_ray(np.float64_t left_edge[3],
+                       np.float64_t right_edge[3],
+                       np.float64_t dds[3],
+                       np.float64_t idds[3],
+                       int dims[3],
+                       np.float64_t v_pos[3],
+                       np.float64_t v_dir[3],
+                       np.float64_t *return_t,
+                       np.float64_t enter_t,
+                       void *rdata):
+    cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+    cdef np.float64_t intersect_t = 1.0
+    cdef np.float64_t iv_dir[3]
+    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+    cdef np.float64_t dist, alpha, dt, exit_t
+    cdef np.float64_t tr, tl, temp_x, temp_y, dv
+    for i in range(3):
+        if (v_dir[i] < 0):
+            step[i] = -1
+        elif (v_dir[i] == 0):
+            step[i] = 1
+            tmax[i] = 1e60
+            iv_dir[i] = 1e60
+            tdelta[i] = 1e-60
+            continue
+        else:
+            step[i] = 1
+        x = (i+1) % 3
+        y = (i+2) % 3
+        iv_dir[i] = 1.0/v_dir[i]
+        tl = (left_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tl*v_dir[x])
+        temp_y = (v_pos[y] + tl*v_dir[y])
+        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
+           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
+           0.0 <= tl and tl < intersect_t:
+            direction = i
+            intersect_t = tl
+        tr = (right_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tr*v_dir[x])
+        temp_y = (v_pos[y] + tr*v_dir[y])
+        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
+           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
+           0.0 <= tr and tr < intersect_t:
+            direction = i
+            intersect_t = tr
+    if left_edge[0] <= v_pos[0] and v_pos[0] <= right_edge[0] and \
+       left_edge[1] <= v_pos[1] and v_pos[1] <= right_edge[1] and \
+       left_edge[2] <= v_pos[2] and v_pos[2] <= right_edge[2]:
+        intersect_t = 0.0
+    if enter_t >= 0.0: intersect_t = enter_t
+    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    for i in range(3):
+        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+        cur_ind[i] = <int> floor((intersect[i] +
+                                  step[i]*1e-8*dds[i] -
+                                  left_edge[i])*idds[i])
+        tmax[i] = (((cur_ind[i]+step[i])*dds[i])+
+                    left_edge[i]-v_pos[i])*iv_dir[i]
+        # This deals with the asymmetry in having our indices refer to the
+        # left edge of a cell, but the right edge of the brick being one
+        # extra zone out.
+        if cur_ind[i] == dims[i] and step[i] < 0:
+            cur_ind[i] = dims[i] - 1
+        if cur_ind[i] < 0 or cur_ind[i] >= dims[i]: return 0
+        if step[i] > 0:
+            tmax[i] = (((cur_ind[i]+1)*dds[i])
+                        +left_edge[i]-v_pos[i])*iv_dir[i]
+        if step[i] < 0:
+            tmax[i] = (((cur_ind[i]+0)*dds[i])
+                        +left_edge[i]-v_pos[i])*iv_dir[i]
+        tdelta[i] = (dds[i]*iv_dir[i])
+        if tdelta[i] < 0: tdelta[i] *= -1
+    # We have to jumpstart our calculation
+    enter_t = intersect_t
+    hit = 0
+    while 1:
+        # dims here is one less than the dimensions of the data,
+        # but we are tracing on the grid, not on the data...
+        if (not (0 <= cur_ind[0] < dims[0])) or \
+           (not (0 <= cur_ind[1] < dims[1])) or \
+           (not (0 <= cur_ind[2] < dims[2])):
+            break
+        hit += 1
+        if tmax[0] < tmax[1]:
+            if tmax[0] < tmax[2]:
+                exit_t = fmin(tmax[0], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[0] += step[0]
+                enter_t = tmax[0]
+                tmax[0] += tdelta[0]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        else:
+            if tmax[1] < tmax[2]:
+                exit_t = fmin(tmax[1], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[1] += step[1]
+                enter_t = tmax[1]
+                tmax[1] += tdelta[1]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        if enter_t >= 1.0: break
+    if return_t != NULL: return_t[0] = exit_t
+    return hit
+



https://bitbucket.org/yt_analysis/yt/changeset/5303e3e582c6/
changeset:   5303e3e582c6
branch:      yt
user:        MatthewTurk
date:        2011-12-07 16:21:14
summary:     Merging from geometry_handling.  This largely includes the moving of items
inside amr_utils to be individual Cython files and extensions, rather than a
single monolithic extension.  It also includes a few items that change how
ortho rays are found.
affected #:  17 files

diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -9,6 +9,22 @@
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
+yt/utilities/_amr_utils/CICDeposit.c
+yt/utilities/_amr_utils/ContourFinding.c
+yt/utilities/_amr_utils/DepthFirstOctree.c
+yt/utilities/_amr_utils/FixedInterpolator.c
+yt/utilities/_amr_utils/fortran_reader.c
+yt/utilities/_amr_utils/freetype_writer.c
+yt/utilities/_amr_utils/geometry_utils.c
+yt/utilities/_amr_utils/Interpolators.c
+yt/utilities/_amr_utils/kdtree.c
+yt/utilities/_amr_utils/misc_utilities.c
+yt/utilities/_amr_utils/Octree.c
+yt/utilities/_amr_utils/png_writer.c
+yt/utilities/_amr_utils/PointsInVolume.c
+yt/utilities/_amr_utils/QuadTree.c
+yt/utilities/_amr_utils/RayIntegrators.c
+yt/utilities/_amr_utils/VolumeIntegrator.c
 syntax: glob
 *.pyc
 .*.swp


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,7 +41,7 @@
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
     QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
-    march_cubes_grid, march_cubes_grid_flux
+    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -557,12 +557,10 @@
         return (self.px, self.py)
 
     def _get_list_of_grids(self):
-        # This bugs me, but we will give the tie to the LeftEdge
-        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
-                    & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
-                    & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
-                    & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
-        self._grids = self.hierarchy.grids[y]
+        gi = ortho_ray_grids(self, 
+                self.hierarchy.grid_left_edge,
+                self.hierarchy.grid_right_edge)
+        self._grids = self.hierarchy.grids[gi]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
@@ -633,31 +631,10 @@
         #self._refresh_data()
 
     def _get_list_of_grids(self):
-        # Get the value of the line at each LeftEdge and RightEdge
-        LE = self.pf.h.grid_left_edge
-        RE = self.pf.h.grid_right_edge
-        p = na.zeros(self.pf.h.num_grids, dtype='bool')
-        # Check left faces first
-        for i in range(3):
-            i1 = (i+1) % 3
-            i2 = (i+2) % 3
-            vs = self._get_line_at_coord(LE[:,i], i)
-            p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
-                    & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-            vs = self._get_line_at_coord(RE[:,i], i)
-            p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
-                    & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
-                & na.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
-                & na.all( RE >= self.end_point,   axis=1 ) )
-        self._grids = self.hierarchy.grids[p]
-
-    def _get_line_at_coord(self, v, index):
-        # t*self.vec + self.start_point = self.end_point
-        t = (v - self.start_point[index])/self.vec[index]
-        t = t.reshape((t.shape[0],1))
-        return self.start_point + t*self.vec
+        gi = ray_grids(self,
+                self.hierarchy.grid_left_edge,
+                self.hierarchy.grid_right_edge)
+        self._grids = self.hierarchy.grids[gi]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -23,6 +23,10 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+cimport numpy as np
+cimport cython
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def CICDeposit_3(np.ndarray[np.float64_t, ndim=1] posx,


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/Interpolators.pyx
--- a/yt/utilities/_amr_utils/Interpolators.pyx
+++ b/yt/utilities/_amr_utils/Interpolators.pyx
@@ -26,6 +26,7 @@
 import numpy as np
 cimport numpy as np
 cimport cython
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 
 @cython.cdivision(True)
 @cython.wraparound(False)


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/Octree.pyx
--- a/yt/utilities/_amr_utils/Octree.pyx
+++ b/yt/utilities/_amr_utils/Octree.pyx
@@ -30,6 +30,7 @@
 cimport numpy as cnp
 cimport cython
 
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 from stdlib cimport malloc, free, abs
 
 import sys, time


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -29,31 +29,8 @@
 cimport kdtree_utils
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
-
-cdef inline int imax(int i0, int i1):
-    if i0 > i1: return i0
-    return i1
-
-cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
-    if f0 > f1: return f0
-    return f1
-
-cdef inline int imin(int i0, int i1):
-    if i0 < i1: return i0
-    return i1
-
-cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
-    if f0 < f1: return f0
-    return f1
-
-cdef inline int iclip(int i, int a, int b):
-    if i < a: return a
-    if i > b: return b
-    return i
-
-cdef inline np.float64_t fclip(np.float64_t f,
-                      np.float64_t a, np.float64_t b):
-    return fmin(fmax(f, a), b)
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from ray_handling cimport integrate_ray, ray_sampler
 
 cdef extern from "math.h":
     double exp(double x)
@@ -311,6 +288,9 @@
     cdef int n_fields
     cdef int n_field_tables
     cdef public int ns
+    cdef int grad
+    cdef np.float64_t light_source_v[3]
+    cdef np.float64_t light_source_c[3]
 
     # These are the field tables and their affiliated storage.
     # We have one field_id for every table.  Note that a single field can
@@ -365,13 +345,19 @@
         for i in range(6):
             self.field_table_ids[i] = tf_obj.field_table_ids[i]
             #print "Channel", i, "corresponds to", self.field_table_ids[i]
+
+        self.grad = tf_obj.grad_field
+        for i in range(3):
+            self.light_source_v[i] = tf_obj.light_source_v[i]
+            self.light_source_c[i] = tf_obj.light_source_c[i]
             
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef void eval_transfer(self, np.float64_t dt, np.float64_t *dvs,
                                   np.float64_t *rgba, np.float64_t *grad):
         cdef int i, fid, use
-        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod, normalize
         # NOTE: We now disable this.  I have left it to ease the process of
         # potentially, one day, re-including it.
         #use = 0
@@ -391,6 +377,13 @@
         for i in range(6):
             trgba[i] = istorage[self.field_table_ids[i]]
             #print i, trgba[i],
+        if self.grad != -1:
+            dot_prod = 0.0
+            for i in range(3):
+                dot_prod += grad[i] * self.light_source_v[i]
+            if dot_prod < 0: dot_prod = 0.0
+            for i in range(3):
+                trgba[i] *= dot_prod * self.light_source_c[i]
         #print
         # A few words on opacity.  We're going to be integrating equation 1.23
         # from Rybicki & Lightman.  dI_\nu / ds = -\alpha_\nu I_\nu + j_\nu
@@ -489,6 +482,13 @@
 
 cdef struct AdaptiveRayPacket
 
+cdef class PartitionedGrid
+
+cdef struct VolumeRendererData:
+    np.float64_t *rgba
+    TransferFunctionProxy tf
+    PartitionedGrid pg
+
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object LeftEdge
@@ -604,186 +604,21 @@
                     if temp > extrema[3]: extrema[3] = temp
         #print extrema[0], extrema[1], extrema[2], extrema[3]
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int integrate_ray(self, np.float64_t v_pos[3],
-                                 np.float64_t v_dir[3],
-                                 np.float64_t rgba[4],
-                                 TransferFunctionProxy tf,
-                                 np.float64_t *return_t = NULL,
-                                 np.float64_t enter_t = -1.0):
-        cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
-        cdef np.float64_t intersect_t = 1.0
-        cdef np.float64_t iv_dir[3]
-        cdef np.float64_t intersect[3], tmax[3], tdelta[3]
-        cdef np.float64_t dist, alpha, dt, exit_t
-        cdef np.float64_t tr, tl, temp_x, temp_y, dv
-        for i in range(3):
-            if (v_dir[i] < 0):
-                step[i] = -1
-            elif (v_dir[i] == 0):
-                step[i] = 1
-                tmax[i] = 1e60
-                iv_dir[i] = 1e60
-                tdelta[i] = 1e-60
-                continue
-            else:
-                step[i] = 1
-            x = (i+1) % 3
-            y = (i+2) % 3
-            iv_dir[i] = 1.0/v_dir[i]
-            tl = (self.left_edge[i] - v_pos[i])*iv_dir[i]
-            temp_x = (v_pos[x] + tl*v_dir[x])
-            temp_y = (v_pos[y] + tl*v_dir[y])
-            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
-               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
-               0.0 <= tl and tl < intersect_t:
-                direction = i
-                intersect_t = tl
-            tr = (self.right_edge[i] - v_pos[i])*iv_dir[i]
-            temp_x = (v_pos[x] + tr*v_dir[x])
-            temp_y = (v_pos[y] + tr*v_dir[y])
-            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
-               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
-               0.0 <= tr and tr < intersect_t:
-                direction = i
-                intersect_t = tr
-        if self.left_edge[0] <= v_pos[0] and v_pos[0] <= self.right_edge[0] and \
-           self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
-           self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
-            intersect_t = 0.0
-        if enter_t >= 0.0: intersect_t = enter_t
-        if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
-        for i in range(3):
-            intersect[i] = v_pos[i] + intersect_t * v_dir[i]
-            cur_ind[i] = <int> floor((intersect[i] +
-                                      step[i]*1e-8*self.dds[i] -
-                                      self.left_edge[i])*self.idds[i])
-            tmax[i] = (((cur_ind[i]+step[i])*self.dds[i])+
-                        self.left_edge[i]-v_pos[i])*iv_dir[i]
-            # This deals with the asymmetry in having our indices refer to the
-            # left edge of a cell, but the right edge of the brick being one
-            # extra zone out.
-            if cur_ind[i] == self.dims[i] and step[i] < 0:
-                cur_ind[i] = self.dims[i] - 1
-            if cur_ind[i] < 0 or cur_ind[i] >= self.dims[i]: return 0
-            if step[i] > 0:
-                tmax[i] = (((cur_ind[i]+1)*self.dds[i])
-                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
-            if step[i] < 0:
-                tmax[i] = (((cur_ind[i]+0)*self.dds[i])
-                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
-            tdelta[i] = (self.dds[i]*iv_dir[i])
-            if tdelta[i] < 0: tdelta[i] *= -1
-        # We have to jumpstart our calculation
-        enter_t = intersect_t
-        hit = 0
-        while 1:
-            # dims here is one less than the dimensions of the data,
-            # but we are tracing on the grid, not on the data...
-            if (not (0 <= cur_ind[0] < self.dims[0])) or \
-               (not (0 <= cur_ind[1] < self.dims[1])) or \
-               (not (0 <= cur_ind[2] < self.dims[2])):
-                break
-            hit += 1
-            if tmax[0] < tmax[1]:
-                if tmax[0] < tmax[2]:
-                    exit_t = fmin(tmax[0], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[0] += step[0]
-                    enter_t = tmax[0]
-                    tmax[0] += tdelta[0]
-                else:
-                    exit_t = fmin(tmax[2], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[2] += step[2]
-                    enter_t = tmax[2]
-                    tmax[2] += tdelta[2]
-            else:
-                if tmax[1] < tmax[2]:
-                    exit_t = fmin(tmax[1], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[1] += step[1]
-                    enter_t = tmax[1]
-                    tmax[1] += tdelta[1]
-                else:
-                    exit_t = fmin(tmax[2], 1.0)
-                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
-                                       rgba, tf)
-                    cur_ind[2] += step[2]
-                    enter_t = tmax[2]
-                    tmax[2] += tdelta[2]
-            if enter_t >= 1.0: break
-        if return_t != NULL: return_t[0] = exit_t
-        return hit
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef void sample_values(self,
-                            np.float64_t v_pos[3],
-                            np.float64_t v_dir[3],
-                            np.float64_t enter_t,
-                            np.float64_t exit_t,
-                            int ci[3],
-                            np.float64_t *rgba,
-                            TransferFunctionProxy tf):
-        cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
-        cdef np.float64_t grad[3], ds[3]
-        cdef np.float64_t local_dds[3], cell_left[3]
-        grad[0] = grad[1] = grad[2] = 0.0
-        cdef int dti, i
-        cdef kdtree_utils.kdres *ballq = NULL
-        dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
-        cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
-                        + ci[1] * (self.dims[2] + 1) + ci[2]
-        # The initial and final values can be linearly interpolated between; so
-        # we just have to calculate our initial and final values.
-        cdef np.float64_t slopes[6]
-        for i in range(3):
-            dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-            dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
-            dp[i] *= self.idds[i]
-            ds[i] = v_dir[i] * self.idds[i] * dt
-        for i in range(self.n_fields):
-            slopes[i] = offset_interpolate(self.dims, dp,
-                            self.data[i] + offset)
-        for i in range(3):
-            dp[i] += ds[i] * tf.ns
-        cdef np.float64_t temp
-        for i in range(self.n_fields):
-            temp = slopes[i]
-            slopes[i] -= offset_interpolate(self.dims, dp,
-                             self.data[i] + offset)
-            slopes[i] *= -1.0/tf.ns
-            self.dvs[i] = temp
-        if self.star_list != NULL:
-            for i in range(3):
-                cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
-                # this gets us dp as the current first sample position
-                pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-                dp[i] -= tf.ns * ds[i]
-                local_dds[i] = v_dir[i] * dt
-            ballq = kdtree_utils.kd_nearest_range3(
-                self.star_list, cell_left[0] + self.dds[0]*0.5,
-                                cell_left[1] + self.dds[1]*0.5,
-                                cell_left[2] + self.dds[2]*0.5,
-                                self.star_er + 0.9*self.dds[0])
-                                            # ~0.866 + a bit
-        for dti in range(tf.ns): 
-            #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
-            #    continue
-            if self.star_list != NULL:
-                self.add_stars(ballq, dt, pos, rgba)
-                for i in range(3):
-                    dp[i] += ds[i]
-                    pos[i] += local_dds[i]
-            tf.eval_transfer(dt, self.dvs, rgba, grad)
-            for i in range(self.n_fields):
-                self.dvs[i] += slopes[i]
-        if ballq != NULL: kdtree_utils.kd_res_free(ballq)
+    cdef int integrate_ray(np.float64_t left_edge[3],
+                           np.float64_t right_edge[3],
+                           np.float64_t dds[3],
+                           np.float64_t idds[3],
+                           int dims[3],
+                           np.float64_t v_pos[3],
+                           np.float64_t v_dir[3],
+                           np.float64_t rgba[4],
+                           TransferFunctionProxy tf,
+                           np.float64_t *return_t = NULL,
+                           np.float64_t enter_t = -1.0):
+        integrate_ray(self.left_edge, self.right_edge,
+                      self.dds, self.idds, self.dims, v_pos,
+                      v_dir, rgba, tf, return_t, enter_t,
+                      self.sample_value)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -2011,3 +1846,72 @@
 # From Enzo:
 #   dOmega = 4 pi r^2/Nrays
 #   if (dOmega > RaysPerCell * dx^2) then split
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void pg_sample_values(np.float64_t v_pos[3],
+                        np.float64_t v_dir[3],
+                        np.float64_t enter_t,
+                        np.float64_t exit_t,
+                        int ci[3],
+                        void *rdata):
+    cdef VolumeRendererData *dd = <VolumeRendererData*> rdata
+    cdef PartitionedGrid self = dd.pg
+    cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
+    cdef np.float64_t grad[3], ds[3]
+    cdef np.float64_t local_dds[3], cell_left[3]
+    grad[0] = grad[1] = grad[2] = 0.0
+    cdef int dti, i
+    cdef kdtree_utils.kdres *ballq = NULL
+    dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
+    cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+                    + ci[1] * (self.dims[2] + 1) + ci[2]
+    # The initial and final values can be linearly interpolated between; so
+    # we just have to calculate our initial and final values.
+    cdef np.float64_t slopes[6]
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
+        dp[i] *= self.idds[i]
+        ds[i] = v_dir[i] * self.idds[i] * dt
+    for i in range(self.n_fields):
+        slopes[i] = offset_interpolate(self.dims, dp,
+                        self.data[i] + offset)
+        if tf.grad == i:
+            eval_gradient(self.dims, dp, self.data[i] + offset,
+                          grad)
+    for i in range(3):
+        dp[i] += ds[i] * tf.ns
+    cdef np.float64_t temp
+    for i in range(self.n_fields):
+        temp = slopes[i]
+        slopes[i] -= offset_interpolate(self.dims, dp,
+                         self.data[i] + offset)
+        slopes[i] *= -1.0/tf.ns
+        self.dvs[i] = temp
+    if self.star_list != NULL:
+        for i in range(3):
+            cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
+            # this gets us dp as the current first sample position
+            pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+            dp[i] -= tf.ns * ds[i]
+            local_dds[i] = v_dir[i] * dt
+        ballq = kdtree_utils.kd_nearest_range3(
+            self.star_list, cell_left[0] + self.dds[0]*0.5,
+                            cell_left[1] + self.dds[1]*0.5,
+                            cell_left[2] + self.dds[2]*0.5,
+                            self.star_er + 0.9*self.dds[0])
+                                        # ~0.866 + a bit
+    for dti in range(tf.ns): 
+        #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
+        #    continue
+        if self.star_list != NULL:
+            self.add_stars(ballq, dt, pos, rgba)
+            for i in range(3):
+                dp[i] += ds[i]
+                pos[i] += local_dds[i]
+        tf.eval_transfer(dt, self.dvs, rgba, grad)
+        for i in range(self.n_fields):
+            self.dvs[i] += slopes[i]
+    if ballq != NULL: kdtree_utils.kd_res_free(ballq)
+




diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/fp_utils.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -0,0 +1,53 @@
+"""
+Shareable definitions for common fp/int Cython utilities
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+cimport cython
+
+cdef inline int imax(int i0, int i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+    if f0 > f1: return f0
+    return f1
+
+cdef inline int imin(int i0, int i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+    if f0 < f1: return f0
+    return f1
+
+cdef inline int iclip(int i, int a, int b):
+    if i < a: return a
+    if i > b: return b
+    return i
+
+cdef inline np.float64_t fclip(np.float64_t f,
+                      np.float64_t a, np.float64_t b):
+    return fmin(fmax(f, a), b)
+


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/geometry_utils.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/geometry_utils.pyx
@@ -0,0 +1,244 @@
+"""
+Simple integrators for the radiative transfer equation
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from stdlib cimport malloc, free, abs
+
+# These routines are separated into a couple different categories:
+#
+#   * Routines for identifying intersections of an object with a bounding box
+#   * Routines for identifying cells/points inside a bounding box that
+#     intersect with an object
+#   * Routines that speed up some type of geometric calculation
+
+# First, bounding box / object intersection routines.
+# These all respect the interface "dobj" and a set of left_edges, right_edges,
+# sometimes also accepting level and mask information.
+
+def ortho_ray_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
+                          np.ndarray[np.float64_t, ndim=2] right_edges):
+    cdef int i
+    cdef int ng = left_edges.shape[0]
+    cdef int px_ax = dobj.px_ax
+    cdef int py_ax = dobj.py_ax
+    cdef np.float64_t px = dobj.px
+    cdef np.float64_t py = dobj.py
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32_t')
+    for i in range(ng):
+        if (    (px >= left_edges[i, px])
+            and (px < right_edges[i, px])
+            and (py >= left_edges[i, py])
+            and (py < right_edges[i, py])):
+            gridi[i] = 1
+    return gridi
+
+def ray_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
+                    np.ndarray[np.float64_t, ndim=2] right_edges):
+    cdef int i, ax
+    cdef int i1, i2
+    cdef int ng = left_edges.shape[0]
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    cdef np.float64_t vs[3], t, p0[3], p1[3], v[3]
+    for i in range(3):
+        p0[i] = dobj.start_point[i]
+        p1[i] = dobj.end_point[i]
+        v[i] = dobj.vec[i]
+    # We check first to see if at any point, the ray intersects a grid face
+    for gi in range(ng):
+        for ax in range(3):
+            i1 = (ax+1) % 3
+            i2 = (ax+2) % 3
+            t = (left_edges[gi,ax] - p0[ax])/v[ax]
+            for i in range(3):
+                vs[i] = t * v[i] + p0[i]
+            if left_edges[gi,i1] <= vs[i1] and \
+               right_edges[gi,i1] >= vs[i1] and \
+               left_edges[gi,i2] <= vs[i2] and \
+               right_edges[gi,i2] >= vs[i2]:
+                gridi[gi] = 1
+                break
+            t = (right_edges[gi,ax] - p0[ax])/v[ax]
+            for i in range(3):
+                vs[i] = t * v[i] + p0[i]
+            if left_edges[gi,i1] <= vs[i1] and \
+               right_edges[gi,i1] >= vs[i1] and \
+               left_edges[gi,i2] <= vs[i2] and \
+               right_edges[gi,i2] >= vs[i2]:
+                gridi[gi] = 1
+                break
+        if gridi[gi] == 1: continue
+        # if the point is fully enclosed, we count the grid
+        if left_edges[gi,0] <= p0[0] and \
+           right_edges[gi,0] >= p0[0] and \
+           left_edges[gi,1] <= p0[1] and \
+           right_edges[gi,1] >= p0[1] and \
+           left_edges[gi,2] <= p0[2] and \
+           right_edges[gi,2] >= p0[2]:
+            gridi[gi] = 1
+            continue
+        if left_edges[gi,0] <= p1[0] and \
+           right_edges[gi,0] >= p1[0] and \
+           left_edges[gi,1] <= p1[1] and \
+           right_edges[gi,1] >= p1[1] and \
+           left_edges[gi,2] <= p1[2] and \
+           right_edges[gi,2] >= p1[2]:
+            gridi[gi] = 1
+            continue
+    return gridi
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask,
+                        int min_index = 0):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        if i < min_index or levels[i,0] != level:
+            mask[i] = 0
+            continue
+        inside = 1
+        for n in range(3):
+            if left_edge[n] >= right_edges[i,n] or \
+               right_edge[n] <= left_edges[i,n]:
+                inside = 0
+                break
+        if inside == 1: mask[i] = 1
+        else: mask[i] = 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def get_box_grids_below_level(
+                        np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        mask[i] = 0
+        if levels[i,0] <= level:
+            inside = 1
+            for n in range(3):
+                if left_edge[n] >= right_edges[i,n] or \
+                   right_edge[n] <= left_edges[i,n]:
+                    inside = 0
+                    break
+            if inside == 1: mask[i] = 1
+
+# Finally, miscellaneous routines.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def find_values_at_point(np.ndarray[np.float64_t, ndim=1] point,
+                         np.ndarray[np.float64_t, ndim=2] left_edges,
+                         np.ndarray[np.float64_t, ndim=2] right_edges,
+                         np.ndarray[np.int32_t, ndim=2] dimensions,
+                         field_names, grid_objects):
+    # This iterates in order, first to last, and then returns with the first
+    # one in which the point is located; this means if you order from highest
+    # level to lowest, you will find the correct grid without consulting child
+    # masking.  Note also that we will do a few relatively slow operations on
+    # strings and whatnot, but they should not be terribly slow.
+    cdef int ind[3], gi, fi
+    cdef int nf = len(field_names)
+    cdef np.float64_t dds
+    cdef np.ndarray[np.float64_t, ndim=3] field
+    cdef np.ndarray[np.float64_t, ndim=1] rv = np.zeros(nf, dtype='float64')
+    for gi in range(left_edges.shape[0]):
+        if not ((left_edges[gi,0] < point[0] < right_edges[gi,0])
+            and (left_edges[gi,1] < point[1] < right_edges[gi,1])
+            and (left_edges[gi,2] < point[2] < right_edges[gi,2])):
+            continue
+        # We found our grid!
+        for fi in range(3):
+            dds = ((right_edges[gi,fi] - left_edges[gi,fi])/
+                   (<np.float64_t> dimensions[gi,fi]))
+            ind[fi] = <int> ((point[fi] - left_edges[gi,fi])/dds)
+        grid = grid_objects[gi]
+        for fi in range(nf):
+            field = grid[field_names[fi]]
+            rv[fi] = field[ind[0], ind[1], ind[2]]
+        return rv
+    raise KeyError
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rvec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] xf
+    cdef np.ndarray[np.float64_t, ndim=1] yf
+    cdef np.ndarray[np.float64_t, ndim=1] zf
+    cdef np.ndarray[np.float64_t, ndim=2] rf
+    cdef np.ndarray[np.float64_t, ndim=3] xg
+    cdef np.ndarray[np.float64_t, ndim=3] yg
+    cdef np.ndarray[np.float64_t, ndim=3] zg
+    cdef np.ndarray[np.float64_t, ndim=4] rg
+    cdef np.float64_t c[3]
+    cdef int i, j, k
+    center = data.get_field_parameter("center")
+    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
+    if len(data['x'].shape) == 1:
+        # One dimensional data
+        xf = data['x']
+        yf = data['y']
+        zf = data['z']
+        rf = np.empty((3, xf.shape[0]), 'float64')
+        for i in range(xf.shape[0]):
+            rf[0, i] = xf[i] - c[0]
+            rf[1, i] = yf[i] - c[1]
+            rf[2, i] = zf[i] - c[2]
+        return rf
+    else:
+        # Three dimensional data
+        xg = data['x']
+        yg = data['y']
+        zg = data['z']
+        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
+        for i in range(xg.shape[0]):
+            for j in range(xg.shape[1]):
+                for k in range(xg.shape[2]):
+                    rg[0,i,j,k] = xg[i,j,k] - c[0]
+                    rg[1,i,j,k] = yg[i,j,k] - c[1]
+                    rg[2,i,j,k] = zg[i,j,k] - c[2]
+        return rg
+


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -61,6 +61,65 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
+                       np.ndarray[np.float64_t, ndim=1] l_corner,
+                       np.ndarray[np.float64_t, ndim=1] r_corner):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, n_grids, addit, my_split
+    n_grids = data.shape[0]
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                #print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i, j, dim] and
+                        data[i, j, dim] < r_corner[dim]):
+                    #print "Skipping ", data[i,j,dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i, j, dim]:
+                        skipit = 1
+                        #print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i, j, dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        #print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
+    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+    for i in range(n_grids):
+        if data[i, 0, best_dim] < split:
+            less_ids[i] = 1
+        else:
+            less_ids[i] = 0
+        if data[i, 1, best_dim] > split:
+            greater_ids[i] = 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
                         np.ndarray[np.float64_t, ndim=1] right_edge,
                         int level,
@@ -188,62 +247,3 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
-                       np.ndarray[np.float64_t, ndim=1] l_corner,
-                       np.ndarray[np.float64_t, ndim=1] r_corner):
-    cdef int i, j, k, dim, n_unique, best_dim, n_best, n_grids, addit, my_split
-    n_grids = data.shape[0]
-    cdef np.float64_t **uniquedims, *uniques, split
-    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
-    for i in range(3):
-        uniquedims[i] = <np.float64_t *> \
-                alloca(2*n_grids * sizeof(np.float64_t))
-    my_max = 0
-    for dim in range(3):
-        n_unique = 0
-        uniques = uniquedims[dim]
-        for i in range(n_grids):
-            # Check for disqualification
-            for j in range(2):
-                #print "Checking against", i,j,dim,data[i,j,dim]
-                if not (l_corner[dim] < data[i, j, dim] and
-                        data[i, j, dim] < r_corner[dim]):
-                    #print "Skipping ", data[i,j,dim]
-                    continue
-                skipit = 0
-                # Add our left ...
-                for k in range(n_unique):
-                    if uniques[k] == data[i, j, dim]:
-                        skipit = 1
-                        #print "Identified", uniques[k], data[i,j,dim], n_unique
-                        break
-                if skipit == 0:
-                    uniques[n_unique] = data[i, j, dim]
-                    n_unique += 1
-        if n_unique > my_max:
-            best_dim = dim
-            my_max = n_unique
-            my_split = (n_unique-1)/2
-    # I recognize how lame this is.
-    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
-    for i in range(my_max):
-        #print "Setting tarr: ", i, uniquedims[best_dim][i]
-        tarr[i] = uniquedims[best_dim][i]
-    tarr.sort()
-    split = tarr[my_split]
-    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
-    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
-    for i in range(n_grids):
-        if data[i, 0, best_dim] < split:
-            less_ids[i] = 1
-        else:
-            less_ids[i] = 0
-        if data[i, 1, best_dim] > split:
-            greater_ids[i] = 1
-        else:
-            greater_ids[i] = 0
-    # Return out unique values
-    return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/png_writer.pyx
--- a/yt/utilities/_amr_utils/png_writer.pyx
+++ b/yt/utilities/_amr_utils/png_writer.pyx
@@ -26,9 +26,10 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from libc.stdlib cimport malloc, realloc
+from libc.stdlib cimport malloc, realloc, free
 from libc.string cimport memcpy
 from cpython.string cimport PyString_FromStringAndSize
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 
 from stdio cimport fopen, fclose, FILE
 


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/ray_handling.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/ray_handling.pxd
@@ -0,0 +1,153 @@
+"""
+General purpose ray casting
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+cimport numpy as np
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
+ctypedef void (*ray_sampler) (np.float64_t v_pos[3],
+                              np.float64_t v_dir[3],
+                              np.float64_t enter_t,
+                              np.float64_t exit_t,
+                              int ci[3],
+                              void *rdata)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef int integrate_ray(np.float64_t left_edge[3],
+                       np.float64_t right_edge[3],
+                       np.float64_t dds[3],
+                       np.float64_t idds[3],
+                       int dims[3],
+                       np.float64_t v_pos[3],
+                       np.float64_t v_dir[3],
+                       np.float64_t *return_t,
+                       np.float64_t enter_t,
+                       void *rdata):
+    cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+    cdef np.float64_t intersect_t = 1.0
+    cdef np.float64_t iv_dir[3]
+    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+    cdef np.float64_t dist, alpha, dt, exit_t
+    cdef np.float64_t tr, tl, temp_x, temp_y, dv
+    for i in range(3):
+        if (v_dir[i] < 0):
+            step[i] = -1
+        elif (v_dir[i] == 0):
+            step[i] = 1
+            tmax[i] = 1e60
+            iv_dir[i] = 1e60
+            tdelta[i] = 1e-60
+            continue
+        else:
+            step[i] = 1
+        x = (i+1) % 3
+        y = (i+2) % 3
+        iv_dir[i] = 1.0/v_dir[i]
+        tl = (left_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tl*v_dir[x])
+        temp_y = (v_pos[y] + tl*v_dir[y])
+        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
+           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
+           0.0 <= tl and tl < intersect_t:
+            direction = i
+            intersect_t = tl
+        tr = (right_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tr*v_dir[x])
+        temp_y = (v_pos[y] + tr*v_dir[y])
+        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
+           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
+           0.0 <= tr and tr < intersect_t:
+            direction = i
+            intersect_t = tr
+    if left_edge[0] <= v_pos[0] and v_pos[0] <= right_edge[0] and \
+       left_edge[1] <= v_pos[1] and v_pos[1] <= right_edge[1] and \
+       left_edge[2] <= v_pos[2] and v_pos[2] <= right_edge[2]:
+        intersect_t = 0.0
+    if enter_t >= 0.0: intersect_t = enter_t
+    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    for i in range(3):
+        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+        cur_ind[i] = <int> floor((intersect[i] +
+                                  step[i]*1e-8*dds[i] -
+                                  left_edge[i])*idds[i])
+        tmax[i] = (((cur_ind[i]+step[i])*dds[i])+
+                    left_edge[i]-v_pos[i])*iv_dir[i]
+        # This deals with the asymmetry in having our indices refer to the
+        # left edge of a cell, but the right edge of the brick being one
+        # extra zone out.
+        if cur_ind[i] == dims[i] and step[i] < 0:
+            cur_ind[i] = dims[i] - 1
+        if cur_ind[i] < 0 or cur_ind[i] >= dims[i]: return 0
+        if step[i] > 0:
+            tmax[i] = (((cur_ind[i]+1)*dds[i])
+                        +left_edge[i]-v_pos[i])*iv_dir[i]
+        if step[i] < 0:
+            tmax[i] = (((cur_ind[i]+0)*dds[i])
+                        +left_edge[i]-v_pos[i])*iv_dir[i]
+        tdelta[i] = (dds[i]*iv_dir[i])
+        if tdelta[i] < 0: tdelta[i] *= -1
+    # We have to jumpstart our calculation
+    enter_t = intersect_t
+    hit = 0
+    while 1:
+        # dims here is one less than the dimensions of the data,
+        # but we are tracing on the grid, not on the data...
+        if (not (0 <= cur_ind[0] < dims[0])) or \
+           (not (0 <= cur_ind[1] < dims[1])) or \
+           (not (0 <= cur_ind[2] < dims[2])):
+            break
+        hit += 1
+        if tmax[0] < tmax[1]:
+            if tmax[0] < tmax[2]:
+                exit_t = fmin(tmax[0], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[0] += step[0]
+                enter_t = tmax[0]
+                tmax[0] += tdelta[0]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        else:
+            if tmax[1] < tmax[2]:
+                exit_t = fmin(tmax[1], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[1] += step[1]
+                enter_t = tmax[1]
+                tmax[1] += tdelta[1]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        if enter_t >= 1.0: break
+    if return_t != NULL: return_t[0] = exit_t
+    return hit
+


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/_amr_utils/setup.py
--- /dev/null
+++ b/yt/utilities/_amr_utils/setup.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path, glob
+
+def check_for_png():
+    # First up: HDF5_DIR in environment
+    if "PNG_DIR" in os.environ:
+        png_dir = os.environ["PNG_DIR"]
+        png_inc = os.path.join(png_dir, "include")
+        png_lib = os.path.join(png_dir, "lib")
+        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
+        return (png_inc, png_lib)
+    # Next up, we try png.cfg
+    elif os.path.exists("png.cfg"):
+        png_dir = open("png.cfg").read().strip()
+        png_inc = os.path.join(png_dir, "include")
+        png_lib = os.path.join(png_dir, "lib")
+        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
+        return (png_inc, png_lib)
+    # Now we see if ctypes can help us:
+    try:
+        import ctypes.util
+        png_libfile = ctypes.util.find_library("png")
+        if png_libfile is not None and os.path.isfile(png_libfile):
+            # Now we've gotten a library, but we'll need to figure out the
+            # includes if this is going to work.  It feels like there is a
+            # better way to pull off two directory names.
+            png_dir = os.path.dirname(os.path.dirname(png_libfile))
+            if os.path.isdir(os.path.join(png_dir, "include")) and \
+               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
+                png_inc = os.path.join(png_dir, "include")
+                png_lib = os.path.join(png_dir, "lib")
+                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
+                return png_inc, png_lib
+    except ImportError:
+        pass
+    # X11 is where it's located by default on OSX, although I am slightly
+    # reluctant to link against that one.
+    for png_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
+        if os.path.isfile(os.path.join(png_dir, "include", "png.h")):
+            if os.path.isdir(os.path.join(png_dir, "include")) and \
+               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
+                png_inc = os.path.join(png_dir, "include")
+                png_lib = os.path.join(png_dir, "lib")
+                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
+                return png_inc, png_lib
+    print "Reading png location from png.cfg failed."
+    print "Please place the base directory of your png install in png.cfg and restart."
+    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
+    sys.exit(1)
+
+def check_for_freetype():
+    # First up: environment
+    if "FTYPE_DIR" in os.environ:
+        freetype_dir = os.environ["FTYPE_DIR"]
+        freetype_inc = os.path.join(freetype_dir, "include")
+        freetype_lib = os.path.join(freetype_dir, "lib")
+        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
+        return (freetype_inc, freetype_lib)
+    # Next up, we try freetype.cfg
+    elif os.path.exists("freetype.cfg"):
+        freetype_dir = open("freetype.cfg").read().strip()
+        freetype_inc = os.path.join(freetype_dir, "include")
+        freetype_lib = os.path.join(freetype_dir, "lib")
+        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
+        return (freetype_inc, freetype_lib)
+    # Now we see if ctypes can help us:
+    try:
+        import ctypes.util
+        freetype_libfile = ctypes.util.find_library("freetype")
+        if freetype_libfile is not None and os.path.isfile(freetype_libfile):
+            # Now we've gotten a library, but we'll need to figure out the
+            # includes if this is going to work.  It feels like there is a
+            # better way to pull off two directory names.
+            freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
+            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
+               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                freetype_inc = os.path.join(freetype_dir, "include")
+                freetype_lib = os.path.join(freetype_dir, "lib")
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                return freetype_inc, freetype_lib
+    except ImportError:
+        pass
+    # X11 is where it's located by default on OSX, although I am slightly
+    # reluctant to link against that one.
+    for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
+        if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
+               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                freetype_inc = os.path.join(freetype_dir, "include")
+                freetype_lib = os.path.join(freetype_dir, "lib")
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                return freetype_inc, freetype_lib
+    print "Reading freetype location from freetype.cfg failed."
+    print "Please place the base directory of your freetype install in freetype.cfg and restart."
+    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
+    print "You can locate this by looking for the file ft2build.h"
+    sys.exit(1)
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('lib',parent_package,top_path)
+    png_inc, png_lib = check_for_png()
+    freetype_inc, freetype_lib = check_for_freetype()
+    # Because setjmp.h is included by lots of things, and because libpng hasn't
+    # always properly checked its header files (see
+    # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
+    # support for setjmp.
+    config.add_extension("CICDeposit", 
+                ["yt/utilities/_amr_utils/CICDeposit.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("ContourFinding", 
+                ["yt/utilities/_amr_utils/ContourFinding.pyx",
+                 "yt/utilities/_amr_utils/union_find.c"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("DepthFirstOctree", 
+                ["yt/utilities/_amr_utils/DepthFirstOctree.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("fortran_reader", 
+                ["yt/utilities/_amr_utils/fortran_reader.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("freetype_writer", 
+                ["yt/utilities/_amr_utils/freetype_writer.pyx"],
+                include_dirs = [os.path.join(freetype_inc, "freetype2")],
+                library_dirs = [freetype_lib], libraries=["freetype"],
+                depends=["yt/utilities/_amr_utils/freetype_includes.h"])
+    config.add_extension("geometry_utils", 
+                ["yt/utilities/_amr_utils/geometry_utils.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("Interpolators", 
+                ["yt/utilities/_amr_utils/Interpolators.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("misc_utilities", 
+                ["yt/utilities/_amr_utils/misc_utilities.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("Octree", 
+                ["yt/utilities/_amr_utils/Octree.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("png_writer", 
+                ["yt/utilities/_amr_utils/png_writer.pyx"],
+                define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
+                include_dirs=[png_inc],
+                library_dirs=[png_lib],
+                libraries=["m", "png"],
+                depends=["yt/utilities/_amr_utils/fp_utils.pxd"]),
+    config.add_extension("PointsInVolume", 
+                ["yt/utilities/_amr_utils/PointsInVolume.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("QuadTree", 
+                ["yt/utilities/_amr_utils/QuadTree.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("RayIntegrators", 
+                ["yt/utilities/_amr_utils/RayIntegrators.pyx"],
+                libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("VolumeIntegrator", 
+               ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
+                "yt/utilities/_amr_utils/FixedInterpolator.c",
+                "yt/utilities/_amr_utils/kdtree.c"] +
+                 glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
+               include_dirs=["yt/utilities/_amr_utils/"],
+               libraries=["m"], 
+               depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
+                          "yt/utilities/_amr_utils/fp_utils.pxd",
+                          "yt/utilities/_amr_utils/healpix_interface.pxd",
+                          "yt/utilities/_amr_utils/endian_swap.h",
+                          "yt/utilities/_amr_utils/FixedInterpolator.h",
+                          "yt/utilities/_amr_utils/healpix_vectors.h",
+                          "yt/utilities/_amr_utils/kdtree.h",
+                          "yt/utilities/_amr_utils/healpix_ang2pix_nest.c",
+                          "yt/utilities/_amr_utils/healpix_mk_pix2xy.c",
+                          "yt/utilities/_amr_utils/healpix_mk_xy2pix.c",
+                          "yt/utilities/_amr_utils/healpix_pix2ang_nest.c",
+                          "yt/utilities/_amr_utils/healpix_pix2vec_nest.c",
+                          "yt/utilities/_amr_utils/healpix_vec2pix_nest.c"]
+          )
+    config.make_config_py() # installs __config__.py
+    return config


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/amr_utils.py
--- /dev/null
+++ b/yt/utilities/amr_utils.py
@@ -0,0 +1,39 @@
+"""
+Compatibility module
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from ._amr_utils.CICDeposit import *
+from ._amr_utils.ContourFinding import *
+from ._amr_utils.DepthFirstOctree import *
+from ._amr_utils.fortran_reader import *
+from ._amr_utils.freetype_writer import *
+from ._amr_utils.geometry_utils import *
+from ._amr_utils.Interpolators import *
+from ._amr_utils.misc_utilities import *
+from ._amr_utils.Octree import *
+from ._amr_utils.png_writer import *
+from ._amr_utils.PointsInVolume import *
+from ._amr_utils.QuadTree import *
+from ._amr_utils.RayIntegrators import *
+from ._amr_utils.VolumeIntegrator import *


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/amr_utils.pyx
--- a/yt/utilities/amr_utils.pyx
+++ b/yt/utilities/amr_utils.pyx
@@ -48,3 +48,4 @@
 include "_amr_utils/Octree.pyx"
 include "_amr_utils/freetype_writer.pyx"
 include "_amr_utils/misc_utilities.pyx"
+include "_amr_utils/geometry_utils.pyx"


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -2,101 +2,6 @@
 import setuptools
 import os, sys, os.path, glob
 
-def check_for_png():
-    # First up: HDF5_DIR in environment
-    if "PNG_DIR" in os.environ:
-        png_dir = os.environ["PNG_DIR"]
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
-    # Next up, we try png.cfg
-    elif os.path.exists("png.cfg"):
-        png_dir = open("png.cfg").read().strip()
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        png_libfile = ctypes.util.find_library("png")
-        if png_libfile is not None and os.path.isfile(png_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            png_dir = os.path.dirname(os.path.dirname(png_libfile))
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for png_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
-    sys.exit(1)
-
-def check_for_freetype():
-    # First up: environment
-    if "FTYPE_DIR" in os.environ:
-        freetype_dir = os.environ["FTYPE_DIR"]
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
-    # Next up, we try freetype.cfg
-    elif os.path.exists("freetype.cfg"):
-        freetype_dir = open("freetype.cfg").read().strip()
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        freetype_libfile = ctypes.util.find_library("freetype")
-        if freetype_libfile is not None and os.path.isfile(freetype_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
-    print "You can locate this by looking for the file ft2build.h"
-    sys.exit(1)
-
 def check_for_hdf5():
     # First up: HDF5_DIR in environment
     if "HDF5_DIR" in os.environ:
@@ -137,12 +42,6 @@
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('utilities',parent_package,top_path)
-    png_inc, png_lib = check_for_png()
-    freetype_inc, freetype_lib = check_for_freetype()
-    # Because setjmp.h is included by lots of things, and because libpng hasn't
-    # always properly checked its header files (see
-    # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
-    # support for setjmp.
     config.add_subpackage("amr_kdtree")
     config.add_subpackage("answer_testing")
     config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
@@ -150,6 +49,7 @@
     config.add_data_files(('kdtree', ['kdtree/fKDpy.so',]))
     config.add_subpackage("spatial")
     config.add_subpackage("parallel_tools")
+    config.add_subpackage("_amr_utils")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
     hdf5_inc, hdf5_lib = check_for_hdf5()
@@ -159,25 +59,6 @@
                          define_macros=[("H5_USE_16_API",True)],
                          libraries=["m","hdf5"],
                          library_dirs=library_dirs, include_dirs=include_dirs)
-    config.add_extension("amr_utils", 
-        ["yt/utilities/amr_utils.pyx",
-         "yt/utilities/_amr_utils/FixedInterpolator.c",
-         "yt/utilities/_amr_utils/kdtree.c",
-         "yt/utilities/_amr_utils/union_find.c"] +
-         glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
-        define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
-        include_dirs=["yt/utilities/_amr_utils/", png_inc,
-                      freetype_inc, os.path.join(freetype_inc, "freetype2")],
-        library_dirs=[png_lib, freetype_lib],
-        libraries=["m", "png", "freetype"],
-        depends=glob.glob("yt/utilities/_amr_utils/*.pyx") +
-                glob.glob("yt/utilities/_amr_utils/*.h") +
-                glob.glob("yt/utilities/_amr_utils/*.c"),
-        )
-    #config.add_extension("voropp",
-    #    ["yt/utilities/voropp.pyx"],
-    #    language="c++",
-    #    include_dirs=["yt/utilities/voro++"])
     config.add_extension("libconfig_wrapper", 
         ["yt/utilities/libconfig_wrapper.pyx"] +
          glob.glob("yt/utilities/_libconfig/*.c"), 


diff -r 5220b10d9105af3cff9abe85bae5a983f26fc2a7 -r 5303e3e582c693ae04ca3075ca53bbe87e224262 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -61,6 +61,8 @@
         self.x_bounds = x_bounds
         self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
         self.y = na.zeros(nbins, dtype='float64')
+        self.grad_field = -1
+        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -239,6 +241,8 @@
         self.weight_field_ids = [-1] * 6 # This correlates 
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
+        self.grad_field = -1
+        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):



https://bitbucket.org/yt_analysis/yt/changeset/967d516c0683/
changeset:   967d516c0683
branch:      yt
user:        MatthewTurk
date:        2011-12-07 19:14:21
summary:     Reverted the VolumeIntegrator code back to the old version, and added a new
"grid_traversal" code.  This code actually compiles and includes what I believe
are all the components necessary to conduct an off-axis projection.
affected #:  5 files

diff -r 5303e3e582c693ae04ca3075ca53bbe87e224262 -r 967d516c0683a82a202e3da97e3014b0786e8fed yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -29,8 +29,31 @@
 cimport kdtree_utils
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
-from ray_handling cimport integrate_ray, ray_sampler
+
+cdef inline int imax(int i0, int i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+    if f0 > f1: return f0
+    return f1
+
+cdef inline int imin(int i0, int i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+    if f0 < f1: return f0
+    return f1
+
+cdef inline int iclip(int i, int a, int b):
+    if i < a: return a
+    if i > b: return b
+    return i
+
+cdef inline np.float64_t fclip(np.float64_t f,
+                      np.float64_t a, np.float64_t b):
+    return fmin(fmax(f, a), b)
 
 cdef extern from "math.h":
     double exp(double x)
@@ -288,9 +311,6 @@
     cdef int n_fields
     cdef int n_field_tables
     cdef public int ns
-    cdef int grad
-    cdef np.float64_t light_source_v[3]
-    cdef np.float64_t light_source_c[3]
 
     # These are the field tables and their affiliated storage.
     # We have one field_id for every table.  Note that a single field can
@@ -345,19 +365,13 @@
         for i in range(6):
             self.field_table_ids[i] = tf_obj.field_table_ids[i]
             #print "Channel", i, "corresponds to", self.field_table_ids[i]
-
-        self.grad = tf_obj.grad_field
-        for i in range(3):
-            self.light_source_v[i] = tf_obj.light_source_v[i]
-            self.light_source_c[i] = tf_obj.light_source_c[i]
             
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef void eval_transfer(self, np.float64_t dt, np.float64_t *dvs,
                                   np.float64_t *rgba, np.float64_t *grad):
         cdef int i, fid, use
-        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod, normalize
+        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
         # NOTE: We now disable this.  I have left it to ease the process of
         # potentially, one day, re-including it.
         #use = 0
@@ -377,13 +391,6 @@
         for i in range(6):
             trgba[i] = istorage[self.field_table_ids[i]]
             #print i, trgba[i],
-        if self.grad != -1:
-            dot_prod = 0.0
-            for i in range(3):
-                dot_prod += grad[i] * self.light_source_v[i]
-            if dot_prod < 0: dot_prod = 0.0
-            for i in range(3):
-                trgba[i] *= dot_prod * self.light_source_c[i]
         #print
         # A few words on opacity.  We're going to be integrating equation 1.23
         # from Rybicki & Lightman.  dI_\nu / ds = -\alpha_\nu I_\nu + j_\nu
@@ -482,13 +489,6 @@
 
 cdef struct AdaptiveRayPacket
 
-cdef class PartitionedGrid
-
-cdef struct VolumeRendererData:
-    np.float64_t *rgba
-    TransferFunctionProxy tf
-    PartitionedGrid pg
-
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object LeftEdge
@@ -604,21 +604,186 @@
                     if temp > extrema[3]: extrema[3] = temp
         #print extrema[0], extrema[1], extrema[2], extrema[3]
 
-    cdef int integrate_ray(np.float64_t left_edge[3],
-                           np.float64_t right_edge[3],
-                           np.float64_t dds[3],
-                           np.float64_t idds[3],
-                           int dims[3],
-                           np.float64_t v_pos[3],
-                           np.float64_t v_dir[3],
-                           np.float64_t rgba[4],
-                           TransferFunctionProxy tf,
-                           np.float64_t *return_t = NULL,
-                           np.float64_t enter_t = -1.0):
-        integrate_ray(self.left_edge, self.right_edge,
-                      self.dds, self.idds, self.dims, v_pos,
-                      v_dir, rgba, tf, return_t, enter_t,
-                      self.sample_value)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int integrate_ray(self, np.float64_t v_pos[3],
+                                 np.float64_t v_dir[3],
+                                 np.float64_t rgba[4],
+                                 TransferFunctionProxy tf,
+                                 np.float64_t *return_t = NULL,
+                                 np.float64_t enter_t = -1.0):
+        cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+        cdef np.float64_t intersect_t = 1.0
+        cdef np.float64_t iv_dir[3]
+        cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+        cdef np.float64_t dist, alpha, dt, exit_t
+        cdef np.float64_t tr, tl, temp_x, temp_y, dv
+        for i in range(3):
+            if (v_dir[i] < 0):
+                step[i] = -1
+            elif (v_dir[i] == 0):
+                step[i] = 1
+                tmax[i] = 1e60
+                iv_dir[i] = 1e60
+                tdelta[i] = 1e-60
+                continue
+            else:
+                step[i] = 1
+            x = (i+1) % 3
+            y = (i+2) % 3
+            iv_dir[i] = 1.0/v_dir[i]
+            tl = (self.left_edge[i] - v_pos[i])*iv_dir[i]
+            temp_x = (v_pos[x] + tl*v_dir[x])
+            temp_y = (v_pos[y] + tl*v_dir[y])
+            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
+               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
+               0.0 <= tl and tl < intersect_t:
+                direction = i
+                intersect_t = tl
+            tr = (self.right_edge[i] - v_pos[i])*iv_dir[i]
+            temp_x = (v_pos[x] + tr*v_dir[x])
+            temp_y = (v_pos[y] + tr*v_dir[y])
+            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
+               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
+               0.0 <= tr and tr < intersect_t:
+                direction = i
+                intersect_t = tr
+        if self.left_edge[0] <= v_pos[0] and v_pos[0] <= self.right_edge[0] and \
+           self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
+           self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
+            intersect_t = 0.0
+        if enter_t >= 0.0: intersect_t = enter_t
+        if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+        for i in range(3):
+            intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+            cur_ind[i] = <int> floor((intersect[i] +
+                                      step[i]*1e-8*self.dds[i] -
+                                      self.left_edge[i])*self.idds[i])
+            tmax[i] = (((cur_ind[i]+step[i])*self.dds[i])+
+                        self.left_edge[i]-v_pos[i])*iv_dir[i]
+            # This deals with the asymmetry in having our indices refer to the
+            # left edge of a cell, but the right edge of the brick being one
+            # extra zone out.
+            if cur_ind[i] == self.dims[i] and step[i] < 0:
+                cur_ind[i] = self.dims[i] - 1
+            if cur_ind[i] < 0 or cur_ind[i] >= self.dims[i]: return 0
+            if step[i] > 0:
+                tmax[i] = (((cur_ind[i]+1)*self.dds[i])
+                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
+            if step[i] < 0:
+                tmax[i] = (((cur_ind[i]+0)*self.dds[i])
+                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
+            tdelta[i] = (self.dds[i]*iv_dir[i])
+            if tdelta[i] < 0: tdelta[i] *= -1
+        # We have to jumpstart our calculation
+        enter_t = intersect_t
+        hit = 0
+        while 1:
+            # dims here is one less than the dimensions of the data,
+            # but we are tracing on the grid, not on the data...
+            if (not (0 <= cur_ind[0] < self.dims[0])) or \
+               (not (0 <= cur_ind[1] < self.dims[1])) or \
+               (not (0 <= cur_ind[2] < self.dims[2])):
+                break
+            hit += 1
+            if tmax[0] < tmax[1]:
+                if tmax[0] < tmax[2]:
+                    exit_t = fmin(tmax[0], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[0] += step[0]
+                    enter_t = tmax[0]
+                    tmax[0] += tdelta[0]
+                else:
+                    exit_t = fmin(tmax[2], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[2] += step[2]
+                    enter_t = tmax[2]
+                    tmax[2] += tdelta[2]
+            else:
+                if tmax[1] < tmax[2]:
+                    exit_t = fmin(tmax[1], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[1] += step[1]
+                    enter_t = tmax[1]
+                    tmax[1] += tdelta[1]
+                else:
+                    exit_t = fmin(tmax[2], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[2] += step[2]
+                    enter_t = tmax[2]
+                    tmax[2] += tdelta[2]
+            if enter_t >= 1.0: break
+        if return_t != NULL: return_t[0] = exit_t
+        return hit
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void sample_values(self,
+                            np.float64_t v_pos[3],
+                            np.float64_t v_dir[3],
+                            np.float64_t enter_t,
+                            np.float64_t exit_t,
+                            int ci[3],
+                            np.float64_t *rgba,
+                            TransferFunctionProxy tf):
+        cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
+        cdef np.float64_t grad[3], ds[3]
+        cdef np.float64_t local_dds[3], cell_left[3]
+        grad[0] = grad[1] = grad[2] = 0.0
+        cdef int dti, i
+        cdef kdtree_utils.kdres *ballq = NULL
+        dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
+        cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+                        + ci[1] * (self.dims[2] + 1) + ci[2]
+        # The initial and final values can be linearly interpolated between; so
+        # we just have to calculate our initial and final values.
+        cdef np.float64_t slopes[6]
+        for i in range(3):
+            dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+            dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
+            dp[i] *= self.idds[i]
+            ds[i] = v_dir[i] * self.idds[i] * dt
+        for i in range(self.n_fields):
+            slopes[i] = offset_interpolate(self.dims, dp,
+                            self.data[i] + offset)
+        for i in range(3):
+            dp[i] += ds[i] * tf.ns
+        cdef np.float64_t temp
+        for i in range(self.n_fields):
+            temp = slopes[i]
+            slopes[i] -= offset_interpolate(self.dims, dp,
+                             self.data[i] + offset)
+            slopes[i] *= -1.0/tf.ns
+            self.dvs[i] = temp
+        if self.star_list != NULL:
+            for i in range(3):
+                cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
+                # this gets us dp as the current first sample position
+                pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+                dp[i] -= tf.ns * ds[i]
+                local_dds[i] = v_dir[i] * dt
+            ballq = kdtree_utils.kd_nearest_range3(
+                self.star_list, cell_left[0] + self.dds[0]*0.5,
+                                cell_left[1] + self.dds[1]*0.5,
+                                cell_left[2] + self.dds[2]*0.5,
+                                self.star_er + 0.9*self.dds[0])
+                                            # ~0.866 + a bit
+        for dti in range(tf.ns): 
+            #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
+            #    continue
+            if self.star_list != NULL:
+                self.add_stars(ballq, dt, pos, rgba)
+                for i in range(3):
+                    dp[i] += ds[i]
+                    pos[i] += local_dds[i]
+            tf.eval_transfer(dt, self.dvs, rgba, grad)
+            for i in range(self.n_fields):
+                self.dvs[i] += slopes[i]
+        if ballq != NULL: kdtree_utils.kd_res_free(ballq)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1846,72 +2011,3 @@
 # From Enzo:
 #   dOmega = 4 pi r^2/Nrays
 #   if (dOmega > RaysPerCell * dx^2) then split
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void pg_sample_values(np.float64_t v_pos[3],
-                        np.float64_t v_dir[3],
-                        np.float64_t enter_t,
-                        np.float64_t exit_t,
-                        int ci[3],
-                        void *rdata):
-    cdef VolumeRendererData *dd = <VolumeRendererData*> rdata
-    cdef PartitionedGrid self = dd.pg
-    cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
-    cdef np.float64_t grad[3], ds[3]
-    cdef np.float64_t local_dds[3], cell_left[3]
-    grad[0] = grad[1] = grad[2] = 0.0
-    cdef int dti, i
-    cdef kdtree_utils.kdres *ballq = NULL
-    dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
-    cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
-                    + ci[1] * (self.dims[2] + 1) + ci[2]
-    # The initial and final values can be linearly interpolated between; so
-    # we just have to calculate our initial and final values.
-    cdef np.float64_t slopes[6]
-    for i in range(3):
-        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-        dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
-        dp[i] *= self.idds[i]
-        ds[i] = v_dir[i] * self.idds[i] * dt
-    for i in range(self.n_fields):
-        slopes[i] = offset_interpolate(self.dims, dp,
-                        self.data[i] + offset)
-        if tf.grad == i:
-            eval_gradient(self.dims, dp, self.data[i] + offset,
-                          grad)
-    for i in range(3):
-        dp[i] += ds[i] * tf.ns
-    cdef np.float64_t temp
-    for i in range(self.n_fields):
-        temp = slopes[i]
-        slopes[i] -= offset_interpolate(self.dims, dp,
-                         self.data[i] + offset)
-        slopes[i] *= -1.0/tf.ns
-        self.dvs[i] = temp
-    if self.star_list != NULL:
-        for i in range(3):
-            cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
-            # this gets us dp as the current first sample position
-            pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-            dp[i] -= tf.ns * ds[i]
-            local_dds[i] = v_dir[i] * dt
-        ballq = kdtree_utils.kd_nearest_range3(
-            self.star_list, cell_left[0] + self.dds[0]*0.5,
-                            cell_left[1] + self.dds[1]*0.5,
-                            cell_left[2] + self.dds[2]*0.5,
-                            self.star_er + 0.9*self.dds[0])
-                                        # ~0.866 + a bit
-    for dti in range(tf.ns): 
-        #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
-        #    continue
-        if self.star_list != NULL:
-            self.add_stars(ballq, dt, pos, rgba)
-            for i in range(3):
-                dp[i] += ds[i]
-                pos[i] += local_dds[i]
-        tf.eval_transfer(dt, self.dvs, rgba, grad)
-        for i in range(self.n_fields):
-            self.dvs[i] += slopes[i]
-    if ballq != NULL: kdtree_utils.kd_res_free(ballq)
-


diff -r 5303e3e582c693ae04ca3075ca53bbe87e224262 -r 967d516c0683a82a202e3da97e3014b0786e8fed yt/utilities/_amr_utils/fp_utils.pxd
--- a/yt/utilities/_amr_utils/fp_utils.pxd
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -26,28 +26,28 @@
 cimport numpy as np
 cimport cython
 
-cdef inline int imax(int i0, int i1):
+cdef inline int imax(int i0, int i1) nogil:
     if i0 > i1: return i0
     return i1
 
-cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1) nogil:
     if f0 > f1: return f0
     return f1
 
-cdef inline int imin(int i0, int i1):
+cdef inline int imin(int i0, int i1) nogil:
     if i0 < i1: return i0
     return i1
 
-cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1) nogil:
     if f0 < f1: return f0
     return f1
 
-cdef inline int iclip(int i, int a, int b):
+cdef inline int iclip(int i, int a, int b) nogil:
     if i < a: return a
     if i > b: return b
     return i
 
 cdef inline np.float64_t fclip(np.float64_t f,
-                      np.float64_t a, np.float64_t b):
+                      np.float64_t a, np.float64_t b) nogil:
     return fmin(fmax(f, a), b)
 


diff -r 5303e3e582c693ae04ca3075ca53bbe87e224262 -r 967d516c0683a82a202e3da97e3014b0786e8fed yt/utilities/_amr_utils/grid_traversal.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -0,0 +1,519 @@
+"""
+Simple integrators for the radiative transfer equation
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+cimport kdtree_utils
+cimport healpix_interface
+from stdlib cimport malloc, free, abs
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
+cdef extern from "math.h":
+    double exp(double x) nogil
+    float expf(float x) nogil
+    long double expl(long double x) nogil
+    double floor(double x) nogil
+    double ceil(double x) nogil
+    double fmod(double x, double y) nogil
+    double log2(double x) nogil
+    long int lrint(double x) nogil
+    double fabs(double x) nogil
+
+cdef struct VolumeContainer
+ctypedef void sample_function(
+                VolumeContainer *vc,
+                np.float64_t v_pos[3],
+                np.float64_t v_dir[3],
+                np.float64_t enter_t,
+                np.float64_t exit_t,
+                int index[3],
+                void *data)
+
+cdef extern from "FixedInterpolator.h":
+    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                  np.float64_t *data)
+    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3], np.float64_t *data)
+    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                       np.float64_t *data)
+    void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
+                       np.float64_t grad[3])
+    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval)
+    void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
+                       np.float64_t vl[3], np.float64_t dds[3],
+                       np.float64_t x, np.float64_t y, np.float64_t z,
+                       int vind1, int vind2)
+
+cdef struct VolumeContainer:
+    int n_fields
+    np.float64_t **data
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    np.float64_t dds[3]
+    np.float64_t idds[3]
+    int dims[3]
+
+cdef class PartitionedGrid:
+    cdef public object my_data
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef VolumeContainer *container
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __cinit__(self,
+                  int parent_grid_id, data,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  np.ndarray[np.int64_t, ndim=1] dims):
+        # The data is likely brought in via a slice, so we copy it
+        cdef np.ndarray[np.float64_t, ndim=3] tdata
+        self.LeftEdge = left_edge
+        self.RightEdge = right_edge
+        self.container = <VolumeContainer *> \
+            malloc(sizeof(VolumeContainer))
+        cdef VolumeContainer *c = self.container # convenience
+        cdef int n_fields = len(data)
+        for i in range(3):
+            c.left_edge[i] = left_edge[i]
+            c.right_edge[i] = right_edge[i]
+            c.dims[i] = dims[i]
+            c.dds[i] = (self.right_edge[i] - self.left_edge[i])/dims[i]
+            c.idds[i] = 1.0/self.dds[i]
+        self.my_data = data
+        c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
+        for d in data:
+            tdata = d
+            c.data[i] = <np.float64_t *> tdata.data
+
+    def __dealloc__(self):
+        cdef int n_fields = len(self.my_data)
+        # The data fields are not owned by the container, they are owned by us!
+        # So we don't need to deallocate them.
+        free(self.container.data)
+        free(self.container)
+
+cdef struct ImageContainer:
+    np.float64_t *vp_pos, *vp_dir, *center, *image,
+    np.float64_t pdx, pdy, bounds[4]
+    int nv[2]
+    int vp_strides[3]
+    int im_strides[3]
+    int vd_strides[3]
+    np.float64_t *x_vec, *y_vec
+
+cdef struct ImageAccumulator:
+    np.float64_t rgba[4]
+
+cdef class ImageSampler:
+    cdef ImageContainer *image
+    cdef sample_function *sampler
+    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    def __cinit__(self, 
+                  np.ndarray[np.float64_t, ndim=3] vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec):
+        self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
+        cdef ImageContainer *imagec = self.image
+        self.sampler = NULL
+        cdef int i, j
+        # These assignments are so we can track the objects and prevent their
+        # de-allocation from reference counts.
+        self.avp_pos = vp_pos
+        self.avp_dir = vp_dir
+        self.acenter = center
+        self.aimage = image
+        self.ax_vec = x_vec
+        self.ay_vec = y_vec
+        imagec.vp_pos = <np.float64_t *> vp_pos.data
+        imagec.vp_dir = <np.float64_t *> vp_dir.data
+        imagec.center = <np.float64_t *> center.data
+        imagec.image = <np.float64_t *> image.data
+        imagec.x_vec = <np.float64_t *> x_vec.data
+        imagec.y_vec = <np.float64_t *> y_vec.data
+        imagec.nv[0] = vp_pos.shape[0]
+        imagec.nv[1] = vp_pos.shape[1]
+        for i in range(4): image.bounds[i] = bounds[i]
+        imagec.pdx = (self.bounds[1] - self.bounds[0])/self.nv[0]
+        imagec.pdy = (self.bounds[3] - self.bounds[2])/self.nv[1]
+        for i in range(3):
+            imagec.vp_strides[i] = vp_pos.strides[i] / 8
+            imagec.im_strides[i] = image.strides[i] / 8
+        if vp_dir.ndim > 1:
+            for i in range(3):
+                imagec.vd_strides[i] = vp_dir.strides[i] / 8
+        else:
+            imagec.vd_strides[0] = self.vd_strides[1] = self.vd_strides[2] = -1
+        self.setup()
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void get_start_stop(self, np.float64_t *ex, int *rv):
+        # Extrema need to be re-centered
+        cdef np.float64_t cx, cy
+        cdef ImageContainer *im = self.image
+        cdef int i
+        cx = cy = 0.0
+        for i in range(3):
+            cx += im.center[i] * im.x_vec[i]
+            cy += im.center[i] * im.y_vec[i]
+        rv[0] = lrint((ex[0] - cx - im.bounds[0])/im.pdx)
+        rv[1] = rv[0] + lrint((ex[1] - ex[0])/im.pdx)
+        rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
+        rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
+
+    cdef inline void copy_into(self, np.float64_t *fv, np.float64_t *tv,
+                        int i, int j, int nk, int strides[3]) nogil:
+        # We know the first two dimensions of our from-vector, and our
+        # to-vector is flat and 'ni' long
+        cdef int k
+        cdef int offset = strides[0] * i + strides[1] * j
+        for k in range(nk):
+            tv[k] = fv[offset + k]
+
+    cdef inline void copy_back(self, np.float64_t *fv, np.float64_t *tv,
+                        int i, int j, int nk, int strides[3]) nogil:
+        cdef int k
+        cdef int offset = strides[0] * i + strides[1] * j
+        for k in range(nk):
+            tv[offset + k] = fv[k]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void calculate_extent(self, np.float64_t extrema[4],
+                               VolumeContainer *vc) nogil:
+        # We do this for all eight corners
+        cdef np.float64_t *edges[2], temp
+        edges[0] = vc.left_edge
+        edges[1] = vc.right_edge
+        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+        cdef int i, j, k
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    # This should rotate it into the vector plane
+                    temp  = edges[i][0] * self.image.x_vec[0]
+                    temp += edges[j][1] * self.image.x_vec[1]
+                    temp += edges[k][2] * self.image.x_vec[2]
+                    if temp < extrema[0]: extrema[0] = temp
+                    if temp > extrema[1]: extrema[1] = temp
+                    temp  = edges[i][0] * self.image.y_vec[0]
+                    temp += edges[j][1] * self.image.y_vec[1]
+                    temp += edges[k][2] * self.image.y_vec[2]
+                    if temp < extrema[2]: extrema[2] = temp
+                    if temp > extrema[3]: extrema[3] = temp
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __call__(self, PartitionedGrid pg):
+        # This routine will iterate over all of the vectors and cast each in
+        # turn.  Might benefit from a more sophisticated intersection check,
+        # like http://courses.csusm.edu/cs697exz/ray_box.htm
+        cdef int vi, vj, hit, i, ni, nj, nn
+        cdef int iter[4]
+        cdef VolumeContainer *vc = pg.container
+        cdef ImageContainer *im = self.image
+        if self.sampler == NULL: raise RuntimeError
+        cdef np.float64_t v_pos[3], v_dir[3], rgba[6], extrema[4]
+        hit = 0
+        self.calculate_extent(extrema, vc)
+        self.get_start_stop(extrema, iter)
+        iter[0] = iclip(iter[0]-1, 0, im.nv[0])
+        iter[1] = iclip(iter[1]+1, 0, im.nv[0])
+        iter[2] = iclip(iter[2]-1, 0, im.nv[1])
+        iter[3] = iclip(iter[3]+1, 0, im.nv[1])
+        cdef ImageAccumulator idata
+        cdef void *data = <void *> &idata
+        if im.vd_strides[0] == -1:
+            for vi in range(iter[0], iter[1]):
+                for vj in range(iter[2], iter[3]):
+                    for i in range(4): idata.rgba[i] = 0.0
+                    self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
+                    self.copy_into(im.image, idata.rgba, vi, vj, 3, im.im_strides)
+                    walk_volume(vc, v_pos, im.vp_dir, self.sampler, data)
+                    self.copy_back(idata.rgba, im.image, vi, vj, 3, im.im_strides)
+        else:
+            # If we do not have an orthographic projection, we have to cast all
+            # our rays (until we can get an extrema calculation...)
+            for vi in range(im.nv[0]):
+                for vj in range(im.nv[1]):
+                    for i in range(4): idata.rgba[i] = 0.0
+                    self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
+                    self.copy_into(im.image, idata.rgba, vi, vj, 3, im.im_strides)
+                    self.copy_into(im.vp_dir, v_dir, vi, vj, 3, im.vd_strides)
+                    walk_volume(vc, v_pos, v_dir, self.sampler, data)
+                    self.copy_back(idata.rgba, im.image, vi, vj, 3, im.im_strides)
+        return hit
+
+cdef void projection_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data):
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef int i
+    cdef int di = (index[0]*(vc.dims[1])+index[1])*vc.dims[2]+index[2]
+    for i in range(imin(4, vc.n_fields)):
+        im.rgba[i] += vc.data[i][di]
+
+cdef class ProjectionSampler(ImageSampler):
+    def setup(self):
+        self.sampler = projection_sampler
+    
+cdef class GridFace:
+    cdef int direction
+    cdef public np.float64_t coord
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __init__(self, grid, int direction, int left):
+        self.direction = direction
+        if left == 1:
+            self.coord = grid.LeftEdge[direction]
+        else:
+            self.coord = grid.RightEdge[direction]
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = grid.LeftEdge[i]
+            self.right_edge[i] = grid.RightEdge[i]
+        self.left_edge[direction] = self.right_edge[direction] = self.coord
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int proj_overlap(self, np.float64_t *left_edge, np.float64_t *right_edge):
+        cdef int xax, yax
+        xax = (self.direction + 1) % 3
+        yax = (self.direction + 2) % 3
+        if left_edge[xax] >= self.right_edge[xax]: return 0
+        if right_edge[xax] <= self.left_edge[xax]: return 0
+        if left_edge[yax] >= self.right_edge[yax]: return 0
+        if right_edge[yax] <= self.left_edge[yax]: return 0
+        return 1
+
+cdef class ProtoPrism:
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef public object subgrid_faces
+    cdef public int parent_grid_id
+    def __cinit__(self, int parent_grid_id,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  subgrid_faces):
+        self.parent_grid_id = parent_grid_id
+        cdef int i
+        self.LeftEdge = left_edge
+        self.RightEdge = right_edge
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.subgrid_faces = subgrid_faces
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def sweep(self, int direction = 0, int stack = 0):
+        cdef int i
+        cdef GridFace face
+        cdef np.float64_t proto_split[3]
+        for i in range(3): proto_split[i] = self.right_edge[i]
+        for face in self.subgrid_faces[direction]:
+            proto_split[direction] = face.coord
+            if proto_split[direction] <= self.left_edge[direction]:
+                continue
+            if proto_split[direction] == self.right_edge[direction]:
+                if stack == 2: return [self]
+                return self.sweep((direction + 1) % 3, stack + 1)
+            if face.proj_overlap(self.left_edge, proto_split) == 1:
+                left, right = self.split(proto_split, direction)
+                LC = left.sweep((direction + 1) % 3)
+                RC = right.sweep(direction)
+                return LC + RC
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef object split(self, np.float64_t *sp, int direction):
+        cdef int i
+        cdef np.ndarray split_left = self.LeftEdge.copy()
+        cdef np.ndarray split_right = self.RightEdge.copy()
+
+        for i in range(3): split_left[i] = self.right_edge[i]
+        split_left[direction] = sp[direction]
+        left = ProtoPrism(self.parent_grid_id, self.LeftEdge, split_left,
+                          self.subgrid_faces)
+
+        for i in range(3): split_right[i] = self.left_edge[i]
+        split_right[direction] = sp[direction]
+        right = ProtoPrism(self.parent_grid_id, split_right, self.RightEdge,
+                           self.subgrid_faces)
+
+        return (left, right)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def get_brick(self, np.ndarray[np.float64_t, ndim=1] grid_left_edge,
+                        np.ndarray[np.float64_t, ndim=1] grid_dds,
+                        child_mask):
+        # We get passed in the left edge, the dds (which gives dimensions) and
+        # the data, which is already vertex-centered.
+        cdef PartitionedGrid PG
+        cdef int li[3], ri[3], idims[3], i
+        for i in range(3):
+            li[i] = lrint((self.left_edge[i] - grid_left_edge[i])/grid_dds[i])
+            ri[i] = lrint((self.right_edge[i] - grid_left_edge[i])/grid_dds[i])
+            idims[i] = ri[i] - li[i]
+        if child_mask[li[0], li[1], li[2]] == 0: return []
+        cdef np.ndarray[np.int64_t, ndim=1] dims = np.empty(3, dtype='int64')
+        for i in range(3):
+            dims[i] = idims[i]
+        #cdef np.ndarray[np.float64_t, ndim=3] new_data
+        #new_data = data[li[0]:ri[0]+1,li[1]:ri[1]+1,li[2]:ri[2]+1].copy()
+        #PG = PartitionedGrid(self.parent_grid_id, new_data,
+        #                     self.LeftEdge, self.RightEdge, dims)
+        return ((li[0], ri[0]), (li[1], ri[1]), (li[2], ri[2]), dims)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int walk_volume(VolumeContainer *vc,
+                     np.float64_t v_pos[3],
+                     np.float64_t v_dir[3],
+                     sample_function *sampler,
+                     void *data,
+                     np.float64_t *return_t = NULL,
+                     np.float64_t enter_t = -1.0) nogil:
+    cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+    cdef np.float64_t intersect_t = 1.0
+    cdef np.float64_t iv_dir[3]
+    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+    cdef np.float64_t dist, alpha, dt, exit_t
+    cdef np.float64_t tr, tl, temp_x, temp_y, dv
+    for i in range(3):
+        if (v_dir[i] < 0):
+            step[i] = -1
+        elif (v_dir[i] == 0):
+            step[i] = 1
+            tmax[i] = 1e60
+            iv_dir[i] = 1e60
+            tdelta[i] = 1e-60
+            continue
+        else:
+            step[i] = 1
+        x = (i+1) % 3
+        y = (i+2) % 3
+        iv_dir[i] = 1.0/v_dir[i]
+        tl = (vc.left_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tl*v_dir[x])
+        temp_y = (v_pos[y] + tl*v_dir[y])
+        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+           0.0 <= tl and tl < intersect_t:
+            direction = i
+            intersect_t = tl
+        tr = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tr*v_dir[x])
+        temp_y = (v_pos[y] + tr*v_dir[y])
+        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+           0.0 <= tr and tr < intersect_t:
+            direction = i
+            intersect_t = tr
+    if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
+       vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
+       vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
+        intersect_t = 0.0
+    if enter_t >= 0.0: intersect_t = enter_t
+    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    for i in range(3):
+        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+        cur_ind[i] = <int> floor((intersect[i] +
+                                  step[i]*1e-8*vc.dds[i] -
+                                  vc.left_edge[i])*vc.idds[i])
+        tmax[i] = (((cur_ind[i]+step[i])*vc.dds[i])+
+                    vc.left_edge[i]-v_pos[i])*iv_dir[i]
+        # This deals with the asymmetry in having our indices refer to the
+        # left edge of a cell, but the right edge of the brick being one
+        # extra zone out.
+        if cur_ind[i] == vc.dims[i] and step[i] < 0:
+            cur_ind[i] = vc.dims[i] - 1
+        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i]: return 0
+        if step[i] > 0:
+            tmax[i] = (((cur_ind[i]+1)*vc.dds[i])
+                        +vc.left_edge[i]-v_pos[i])*iv_dir[i]
+        if step[i] < 0:
+            tmax[i] = (((cur_ind[i]+0)*vc.dds[i])
+                        +vc.left_edge[i]-v_pos[i])*iv_dir[i]
+        tdelta[i] = (vc.dds[i]*iv_dir[i])
+        if tdelta[i] < 0: tdelta[i] *= -1
+    # We have to jumpstart our calculation
+    enter_t = intersect_t
+    hit = 0
+    while 1:
+        # dims here is one less than the dimensions of the data,
+        # but we are tracing on the grid, not on the data...
+        if (not (0 <= cur_ind[0] < vc.dims[0])) or \
+           (not (0 <= cur_ind[1] < vc.dims[1])) or \
+           (not (0 <= cur_ind[2] < vc.dims[2])):
+            break
+        hit += 1
+        if tmax[0] < tmax[1]:
+            if tmax[0] < tmax[2]:
+                exit_t = fmin(tmax[0], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[0] += step[0]
+                enter_t = tmax[0]
+                tmax[0] += tdelta[0]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        else:
+            if tmax[1] < tmax[2]:
+                exit_t = fmin(tmax[1], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[1] += step[1]
+                enter_t = tmax[1]
+                tmax[1] += tdelta[1]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        if enter_t >= 1.0: break
+    if return_t != NULL: return_t[0] = exit_t
+    return hit


diff -r 5303e3e582c693ae04ca3075ca53bbe87e224262 -r 967d516c0683a82a202e3da97e3014b0786e8fed yt/utilities/_amr_utils/ray_handling.pxd
--- a/yt/utilities/_amr_utils/ray_handling.pxd
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-General purpose ray casting
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-
-cimport numpy as np
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
-
-ctypedef void (*ray_sampler) (np.float64_t v_pos[3],
-                              np.float64_t v_dir[3],
-                              np.float64_t enter_t,
-                              np.float64_t exit_t,
-                              int ci[3],
-                              void *rdata)
-
- at cython.cdivision(True)
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef int integrate_ray(np.float64_t left_edge[3],
-                       np.float64_t right_edge[3],
-                       np.float64_t dds[3],
-                       np.float64_t idds[3],
-                       int dims[3],
-                       np.float64_t v_pos[3],
-                       np.float64_t v_dir[3],
-                       np.float64_t *return_t,
-                       np.float64_t enter_t,
-                       void *rdata):
-    cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
-    cdef np.float64_t intersect_t = 1.0
-    cdef np.float64_t iv_dir[3]
-    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
-    cdef np.float64_t dist, alpha, dt, exit_t
-    cdef np.float64_t tr, tl, temp_x, temp_y, dv
-    for i in range(3):
-        if (v_dir[i] < 0):
-            step[i] = -1
-        elif (v_dir[i] == 0):
-            step[i] = 1
-            tmax[i] = 1e60
-            iv_dir[i] = 1e60
-            tdelta[i] = 1e-60
-            continue
-        else:
-            step[i] = 1
-        x = (i+1) % 3
-        y = (i+2) % 3
-        iv_dir[i] = 1.0/v_dir[i]
-        tl = (left_edge[i] - v_pos[i])*iv_dir[i]
-        temp_x = (v_pos[x] + tl*v_dir[x])
-        temp_y = (v_pos[y] + tl*v_dir[y])
-        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
-           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
-           0.0 <= tl and tl < intersect_t:
-            direction = i
-            intersect_t = tl
-        tr = (right_edge[i] - v_pos[i])*iv_dir[i]
-        temp_x = (v_pos[x] + tr*v_dir[x])
-        temp_y = (v_pos[y] + tr*v_dir[y])
-        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
-           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
-           0.0 <= tr and tr < intersect_t:
-            direction = i
-            intersect_t = tr
-    if left_edge[0] <= v_pos[0] and v_pos[0] <= right_edge[0] and \
-       left_edge[1] <= v_pos[1] and v_pos[1] <= right_edge[1] and \
-       left_edge[2] <= v_pos[2] and v_pos[2] <= right_edge[2]:
-        intersect_t = 0.0
-    if enter_t >= 0.0: intersect_t = enter_t
-    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
-    for i in range(3):
-        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
-        cur_ind[i] = <int> floor((intersect[i] +
-                                  step[i]*1e-8*dds[i] -
-                                  left_edge[i])*idds[i])
-        tmax[i] = (((cur_ind[i]+step[i])*dds[i])+
-                    left_edge[i]-v_pos[i])*iv_dir[i]
-        # This deals with the asymmetry in having our indices refer to the
-        # left edge of a cell, but the right edge of the brick being one
-        # extra zone out.
-        if cur_ind[i] == dims[i] and step[i] < 0:
-            cur_ind[i] = dims[i] - 1
-        if cur_ind[i] < 0 or cur_ind[i] >= dims[i]: return 0
-        if step[i] > 0:
-            tmax[i] = (((cur_ind[i]+1)*dds[i])
-                        +left_edge[i]-v_pos[i])*iv_dir[i]
-        if step[i] < 0:
-            tmax[i] = (((cur_ind[i]+0)*dds[i])
-                        +left_edge[i]-v_pos[i])*iv_dir[i]
-        tdelta[i] = (dds[i]*iv_dir[i])
-        if tdelta[i] < 0: tdelta[i] *= -1
-    # We have to jumpstart our calculation
-    enter_t = intersect_t
-    hit = 0
-    while 1:
-        # dims here is one less than the dimensions of the data,
-        # but we are tracing on the grid, not on the data...
-        if (not (0 <= cur_ind[0] < dims[0])) or \
-           (not (0 <= cur_ind[1] < dims[1])) or \
-           (not (0 <= cur_ind[2] < dims[2])):
-            break
-        hit += 1
-        if tmax[0] < tmax[1]:
-            if tmax[0] < tmax[2]:
-                exit_t = fmin(tmax[0], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[0] += step[0]
-                enter_t = tmax[0]
-                tmax[0] += tdelta[0]
-            else:
-                exit_t = fmin(tmax[2], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[2] += step[2]
-                enter_t = tmax[2]
-                tmax[2] += tdelta[2]
-        else:
-            if tmax[1] < tmax[2]:
-                exit_t = fmin(tmax[1], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[1] += step[1]
-                enter_t = tmax[1]
-                tmax[1] += tdelta[1]
-            else:
-                exit_t = fmin(tmax[2], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[2] += step[2]
-                enter_t = tmax[2]
-                tmax[2] += tdelta[2]
-        if enter_t >= 1.0: break
-    if return_t != NULL: return_t[0] = exit_t
-    return hit
-


diff -r 5303e3e582c693ae04ca3075ca53bbe87e224262 -r 967d516c0683a82a202e3da97e3014b0786e8fed yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -173,5 +173,15 @@
                           "yt/utilities/_amr_utils/healpix_pix2vec_nest.c",
                           "yt/utilities/_amr_utils/healpix_vec2pix_nest.c"]
           )
+    config.add_extension("grid_traversal", 
+               ["yt/utilities/_amr_utils/grid_traversal.pyx",
+                "yt/utilities/_amr_utils/FixedInterpolator.c"],
+               include_dirs=["yt/utilities/_amr_utils/"],
+               libraries=["m"], 
+               depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
+                          "yt/utilities/_amr_utils/fp_utils.pxd",
+                          "yt/utilities/_amr_utils/FixedInterpolator.h",
+                          ]
+          )
     config.make_config_py() # installs __config__.py
     return config



https://bitbucket.org/yt_analysis/yt/changeset/775ebaefdbb1/
changeset:   775ebaefdbb1
branch:      yt
user:        MatthewTurk
date:        2011-12-07 19:26:42
summary:     Minor fixes to get everything to compile nicely.  Added a few more nogils.
affected #:  1 file

diff -r 967d516c0683a82a202e3da97e3014b0786e8fed -r 775ebaefdbb10e4ed0952eb94a10399fe8ae5abf yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -50,7 +50,7 @@
                 np.float64_t enter_t,
                 np.float64_t exit_t,
                 int index[3],
-                void *data)
+                void *data) nogil
 
 cdef extern from "FixedInterpolator.h":
     np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
@@ -100,8 +100,8 @@
             c.left_edge[i] = left_edge[i]
             c.right_edge[i] = right_edge[i]
             c.dims[i] = dims[i]
-            c.dds[i] = (self.right_edge[i] - self.left_edge[i])/dims[i]
-            c.idds[i] = 1.0/self.dds[i]
+            c.dds[i] = (c.right_edge[i] - c.left_edge[i])/dims[i]
+            c.idds[i] = 1.0/c.dds[i]
         self.my_data = data
         c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
         for d in data:
@@ -278,7 +278,7 @@
                  np.float64_t enter_t,
                  np.float64_t exit_t,
                  int index[3],
-                 void *data):
+                 void *data) nogil:
     cdef ImageAccumulator *im = <ImageAccumulator *> data
     cdef int i
     cdef int di = (index[0]*(vc.dims[1])+index[1])*vc.dims[2]+index[2]



https://bitbucket.org/yt_analysis/yt/changeset/25a6a5fa25c1/
changeset:   25a6a5fa25c1
branch:      yt
user:        MatthewTurk
date:        2011-12-07 20:09:48
summary:     Removing nogil for now, since we're still debugging, and now projections work.
affected #:  1 file

diff -r 775ebaefdbb10e4ed0952eb94a10399fe8ae5abf -r 25a6a5fa25c1fccab915332929bd5fb7c2711591 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -50,7 +50,7 @@
                 np.float64_t enter_t,
                 np.float64_t exit_t,
                 int index[3],
-                void *data) nogil
+                void *data)
 
 cdef extern from "FixedInterpolator.h":
     np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
@@ -96,6 +96,7 @@
             malloc(sizeof(VolumeContainer))
         cdef VolumeContainer *c = self.container # convenience
         cdef int n_fields = len(data)
+        c.n_fields = n_fields
         for i in range(3):
             c.left_edge[i] = left_edge[i]
             c.right_edge[i] = right_edge[i]
@@ -104,12 +105,11 @@
             c.idds[i] = 1.0/c.dds[i]
         self.my_data = data
         c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
-        for d in data:
-            tdata = d
+        for i in range(n_fields):
+            tdata = data[i]
             c.data[i] = <np.float64_t *> tdata.data
 
     def __dealloc__(self):
-        cdef int n_fields = len(self.my_data)
         # The data fields are not owned by the container, they are owned by us!
         # So we don't need to deallocate them.
         free(self.container.data)
@@ -159,9 +159,9 @@
         imagec.y_vec = <np.float64_t *> y_vec.data
         imagec.nv[0] = vp_pos.shape[0]
         imagec.nv[1] = vp_pos.shape[1]
-        for i in range(4): image.bounds[i] = bounds[i]
-        imagec.pdx = (self.bounds[1] - self.bounds[0])/self.nv[0]
-        imagec.pdy = (self.bounds[3] - self.bounds[2])/self.nv[1]
+        for i in range(4): imagec.bounds[i] = bounds[i]
+        imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
+        imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
         for i in range(3):
             imagec.vp_strides[i] = vp_pos.strides[i] / 8
             imagec.im_strides[i] = image.strides[i] / 8
@@ -169,7 +169,7 @@
             for i in range(3):
                 imagec.vd_strides[i] = vp_dir.strides[i] / 8
         else:
-            imagec.vd_strides[0] = self.vd_strides[1] = self.vd_strides[2] = -1
+            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
         self.setup()
 
     @cython.boundscheck(False)
@@ -278,12 +278,16 @@
                  np.float64_t enter_t,
                  np.float64_t exit_t,
                  int index[3],
-                 void *data) nogil:
+                 void *data):
     cdef ImageAccumulator *im = <ImageAccumulator *> data
     cdef int i
+    cdef np.float64_t dl = (exit_t - enter_t)
+    # We need this because by default it assumes vertex-centered data.
+    for i in range(3):
+        if index[i] < 0 or index[i] >= vc.dims[i]: return
     cdef int di = (index[0]*(vc.dims[1])+index[1])*vc.dims[2]+index[2]
-    for i in range(imin(4, vc.n_fields)):
-        im.rgba[i] += vc.data[i][di]
+    for i in range(imin(3, vc.n_fields)):
+        im.rgba[i] += vc.data[i][di] * dl
 
 cdef class ProjectionSampler(ImageSampler):
     def setup(self):
@@ -413,7 +417,7 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0) nogil:
+                     np.float64_t enter_t = -1.0):
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.0
     cdef np.float64_t iv_dir[3]



https://bitbucket.org/yt_analysis/yt/changeset/e3ef7ee84e44/
changeset:   e3ef7ee84e44
branch:      yt
user:        MatthewTurk
date:        2011-12-07 20:24:51
summary:     Putting in the threading stuff, although right now I see a big slowdown from
using it.
affected #:  2 files

diff -r 25a6a5fa25c1fccab915332929bd5fb7c2711591 -r e3ef7ee84e440471c029831cd716739cea6de78e yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -31,6 +31,8 @@
 from stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 
+from cython.parallel import prange, parallel, threadid
+
 cdef extern from "math.h":
     double exp(double x) nogil
     float expf(float x) nogil
@@ -50,7 +52,7 @@
                 np.float64_t enter_t,
                 np.float64_t exit_t,
                 int index[3],
-                void *data)
+                void *data) nogil
 
 cdef extern from "FixedInterpolator.h":
     np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
@@ -174,6 +176,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef void get_start_stop(self, np.float64_t *ex, int *rv):
         # Extrema need to be re-centered
         cdef np.float64_t cx, cy
@@ -251,7 +254,7 @@
         cdef ImageAccumulator idata
         cdef void *data = <void *> &idata
         if im.vd_strides[0] == -1:
-            for vi in range(iter[0], iter[1]):
+            for vi in prange(iter[0], iter[1], nogil=True):
                 for vj in range(iter[2], iter[3]):
                     for i in range(4): idata.rgba[i] = 0.0
                     self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
@@ -278,7 +281,7 @@
                  np.float64_t enter_t,
                  np.float64_t exit_t,
                  int index[3],
-                 void *data):
+                 void *data) nogil:
     cdef ImageAccumulator *im = <ImageAccumulator *> data
     cdef int i
     cdef np.float64_t dl = (exit_t - enter_t)
@@ -417,7 +420,7 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0):
+                     np.float64_t enter_t = -1.0) nogil:
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.0
     cdef np.float64_t iv_dir[3]


diff -r 25a6a5fa25c1fccab915332929bd5fb7c2711591 -r e3ef7ee84e440471c029831cd716739cea6de78e yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -178,6 +178,8 @@
                 "yt/utilities/_amr_utils/FixedInterpolator.c"],
                include_dirs=["yt/utilities/_amr_utils/"],
                libraries=["m"], 
+               extra_compile_args=['-fopenmp'],
+               extra_link_args=['-fopenmp'],
                depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
                           "yt/utilities/_amr_utils/fp_utils.pxd",
                           "yt/utilities/_amr_utils/FixedInterpolator.h",



https://bitbucket.org/yt_analysis/yt/changeset/6f5b0e37c7f0/
changeset:   6f5b0e37c7f0
branch:      yt
user:        MatthewTurk
date:        2011-12-08 03:36:28
summary:     Move the assignments into a parallel() section in the projection sampler.
affected #:  1 file

diff -r e3ef7ee84e440471c029831cd716739cea6de78e -r 6f5b0e37c7f0cf1d175348a642046fb66b8e0e81 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -251,19 +251,25 @@
         iter[1] = iclip(iter[1]+1, 0, im.nv[0])
         iter[2] = iclip(iter[2]-1, 0, im.nv[1])
         iter[3] = iclip(iter[3]+1, 0, im.nv[1])
-        cdef ImageAccumulator idata
-        cdef void *data = <void *> &idata
+        cdef ImageAccumulator *idata
+        cdef void *data
         if im.vd_strides[0] == -1:
-            for vi in prange(iter[0], iter[1], nogil=True):
-                for vj in range(iter[2], iter[3]):
-                    for i in range(4): idata.rgba[i] = 0.0
-                    self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
-                    self.copy_into(im.image, idata.rgba, vi, vj, 3, im.im_strides)
-                    walk_volume(vc, v_pos, im.vp_dir, self.sampler, data)
-                    self.copy_back(idata.rgba, im.image, vi, vj, 3, im.im_strides)
+            with nogil, parallel():
+                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+                data = <void *> idata
+                for vi in prange(iter[0], iter[1]):
+                    for vj in range(iter[2], iter[3]):
+                        for i in range(4): idata.rgba[i] = 0.0
+                        self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
+                        self.copy_into(im.image, idata.rgba, vi, vj, 3, im.im_strides)
+                        walk_volume(vc, v_pos, im.vp_dir, self.sampler, data)
+                        self.copy_back(idata.rgba, im.image, vi, vj, 3, im.im_strides)
+                free(idata)
         else:
             # If we do not have an orthographic projection, we have to cast all
             # our rays (until we can get an extrema calculation...)
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            data = <void *> idata
             for vi in range(im.nv[0]):
                 for vj in range(im.nv[1]):
                     for i in range(4): idata.rgba[i] = 0.0
@@ -295,7 +301,7 @@
 cdef class ProjectionSampler(ImageSampler):
     def setup(self):
         self.sampler = projection_sampler
-    
+
 cdef class GridFace:
     cdef int direction
     cdef public np.float64_t coord



https://bitbucket.org/yt_analysis/yt/changeset/59eb79015ebf/
changeset:   59eb79015ebf
branch:      yt
user:        MatthewTurk
date:        2011-12-08 04:16:16
summary:     Add early returns for calling the image sampler
affected #:  1 file

diff -r 6f5b0e37c7f0cf1d175348a642046fb66b8e0e81 -r 59eb79015ebf9351a45dfae1f2b5e3c9b99fa794 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -234,6 +234,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     def __call__(self, PartitionedGrid pg):
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
@@ -248,9 +249,13 @@
         self.calculate_extent(extrema, vc)
         self.get_start_stop(extrema, iter)
         iter[0] = iclip(iter[0]-1, 0, im.nv[0])
+        if iter[0] == 0: return
         iter[1] = iclip(iter[1]+1, 0, im.nv[0])
+        if iter[1] == 0 or iter[1] == iter[0]: return
         iter[2] = iclip(iter[2]-1, 0, im.nv[1])
+        if iter[2] == 0: return
         iter[3] = iclip(iter[3]+1, 0, im.nv[1])
+        if iter[3] == 0 or iter[3] == iter[2]: return
         cdef ImageAccumulator *idata
         cdef void *data
         if im.vd_strides[0] == -1:



https://bitbucket.org/yt_analysis/yt/changeset/63730d8c9ebe/
changeset:   63730d8c9ebe
branch:      yt
user:        MatthewTurk
date:        2011-12-08 12:50:05
summary:     Adding a wrapper to turn on/off google perftools
affected #:  2 files

diff -r 59eb79015ebf9351a45dfae1f2b5e3c9b99fa794 -r 63730d8c9ebe2f7b29f21b412db05a2a59a41d44 yt/utilities/_amr_utils/perftools_wrap.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/perftools_wrap.pyx
@@ -0,0 +1,39 @@
+"""
+Turn on and off perftools profiling
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# For more info:
+# https://pygabriel.wordpress.com/2010/04/14/profiling-python-c-extensions/
+
+# prof.pyx
+cdef extern from "google/profiler.h":
+    void ProfilerStart( char* fname )
+    void ProfilerStop()
+
+def profiler_start(fname):
+    ProfilerStart(<char *>fname)
+
+def profiler_stop():
+    ProfilerStop()
+


diff -r 59eb79015ebf9351a45dfae1f2b5e3c9b99fa794 -r 63730d8c9ebe2f7b29f21b412db05a2a59a41d44 yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -185,5 +185,16 @@
                           "yt/utilities/_amr_utils/FixedInterpolator.h",
                           ]
           )
+    if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
+        gpd = os.environ["GPERFTOOLS"]
+        idir = os.path.join(gpd, "include")
+        ldir = os.path.join(gpd, "lib")
+        print "INCLUDE AND LIB DIRS", idir, ldir
+        config.add_extension("perftools_wrap",
+                ["yt/utilities/_amr_utils/perftools_wrap.pyx"],
+                libraries=["profiler"],
+                library_dirs = [ldir],
+                include_dirs = [idir],
+            )
     config.make_config_py() # installs __config__.py
     return config



https://bitbucket.org/yt_analysis/yt/changeset/7caa49bd6e93/
changeset:   7caa49bd6e93
branch:      yt
user:        MatthewTurk
date:        2011-12-08 16:34:51
summary:     Now it gets the right answer in thread-mode but is no faster.
affected #:  1 file

diff -r 63730d8c9ebe2f7b29f21b412db05a2a59a41d44 -r 7caa49bd6e935229db6619a8337f7276d9585b8a yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -127,7 +127,7 @@
     np.float64_t *x_vec, *y_vec
 
 cdef struct ImageAccumulator:
-    np.float64_t rgba[4]
+    np.float64_t rgba[3]
 
 cdef class ImageSampler:
     cdef ImageContainer *image
@@ -239,12 +239,12 @@
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
-        cdef int vi, vj, hit, i, ni, nj, nn
+        cdef int vi, vj, hit, i, j, ni, nj, nn, offset
         cdef int iter[4]
         cdef VolumeContainer *vc = pg.container
         cdef ImageContainer *im = self.image
         if self.sampler == NULL: raise RuntimeError
-        cdef np.float64_t v_pos[3], v_dir[3], rgba[6], extrema[4]
+        cdef np.float64_t *v_pos, v_dir[3], rgba[6], extrema[4]
         hit = 0
         self.calculate_extent(extrema, vc)
         self.get_start_stop(extrema, iter)
@@ -258,17 +258,24 @@
         if iter[3] == 0 or iter[3] == iter[2]: return
         cdef ImageAccumulator *idata
         cdef void *data
+        cdef int nx = (iter[1] - iter[0])
+        cdef int ny = (iter[3] - iter[2])
+        cdef int size = nx * ny
         if im.vd_strides[0] == -1:
             with nogil, parallel():
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                data = <void *> idata
-                for vi in prange(iter[0], iter[1]):
-                    for vj in range(iter[2], iter[3]):
-                        for i in range(4): idata.rgba[i] = 0.0
-                        self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
-                        self.copy_into(im.image, idata.rgba, vi, vj, 3, im.im_strides)
-                        walk_volume(vc, v_pos, im.vp_dir, self.sampler, data)
-                        self.copy_back(idata.rgba, im.image, vi, vj, 3, im.im_strides)
+                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+                for j in prange(size, schedule="dynamic"):
+                    vj = j % ny
+                    vi = (j - vj) / ny + iter[0]
+                    vj = vj + iter[2]
+                    offset = im.vp_strides[0] * vi + im.vp_strides[1] * vj
+                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
                 free(idata)
         else:
             # If we do not have an orthographic projection, we have to cast all



https://bitbucket.org/yt_analysis/yt/changeset/ba3f092ed213/
changeset:   ba3f092ed213
branch:      yt
user:        MatthewTurk
date:        2011-12-08 17:37:48
summary:     Changing scheduling to 'auto'
affected #:  1 file

diff -r 7caa49bd6e935229db6619a8337f7276d9585b8a -r ba3f092ed213e09ca12c68234075d24d7d1e7262 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -265,7 +265,7 @@
             with nogil, parallel():
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="dynamic"):
+                for j in prange(size, schedule="auto"):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]



https://bitbucket.org/yt_analysis/yt/changeset/7a5ad9370a6c/
changeset:   7a5ad9370a6c
branch:      yt
user:        MatthewTurk
date:        2011-12-08 19:22:10
summary:     Too many early return statements.  A few could come back, but some are
incorrect.
affected #:  1 file

diff -r ba3f092ed213e09ca12c68234075d24d7d1e7262 -r 7a5ad9370a6c293a436922b0899493efe89b524a yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -249,13 +249,9 @@
         self.calculate_extent(extrema, vc)
         self.get_start_stop(extrema, iter)
         iter[0] = iclip(iter[0]-1, 0, im.nv[0])
-        if iter[0] == 0: return
         iter[1] = iclip(iter[1]+1, 0, im.nv[0])
-        if iter[1] == 0 or iter[1] == iter[0]: return
         iter[2] = iclip(iter[2]-1, 0, im.nv[1])
-        if iter[2] == 0: return
         iter[3] = iclip(iter[3]+1, 0, im.nv[1])
-        if iter[3] == 0 or iter[3] == iter[2]: return
         cdef ImageAccumulator *idata
         cdef void *data
         cdef int nx = (iter[1] - iter[0])
@@ -265,7 +261,7 @@
             with nogil, parallel():
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="auto"):
+                for j in prange(size, schedule="dynamic"):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]



https://bitbucket.org/yt_analysis/yt/changeset/f8667ca04152/
changeset:   f8667ca04152
branch:      yt
user:        MatthewTurk
date:        2011-12-12 20:08:19
summary:     Continuing the refactoring of the volume rendering by adding a volume rendering
sampler.  It compiles.
affected #:  2 files

diff -r 7a5ad9370a6c293a436922b0899493efe89b524a -r f8667ca04152bbd97cec0521a6941c1390247e95 yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -0,0 +1,93 @@
+"""
+Field Interpolation Tables
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport cython
+cimport numpy as np
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
+cdef struct FieldInterpolationTable:
+    # Note that we make an assumption about retaining a reference to values
+    # externally.
+    np.float64_t *values 
+    np.float64_t bounds[2]
+    np.float64_t dbin
+    np.float64_t idbin
+    int field_id
+    int weight_field_id
+    int weight_table_id
+    int nbins
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void FIT_initialize_table(FieldInterpolationTable *fit, int nbins,
+              np.float64_t *values, np.float64_t bounds1, np.float64_t bounds2,
+              int field_id, int weight_field_id, int weight_table_id) nogil:
+    fit.bounds[0] = bounds1; fit.bounds[1] = bounds2
+    fit.nbins = nbins
+    fit.dbin = (fit.bounds[1] - fit.bounds[0])/fit.nbins
+    fit.idbin = 1.0/fit.dbin
+    # Better not pull this out from under us, yo
+    fit.values = values
+    fit.field_id = field_id
+    fit.weight_field_id = weight_field_id
+    fit.weight_table_id = weight_table_id
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.float64_t FIT_get_value(FieldInterpolationTable *fit,
+                                       np.float64_t dvs[6]) nogil:
+    cdef np.float64_t bv, dy, dd, tf, rv
+    cdef int bin_id
+    if dvs[fit.field_id] > fit.bounds[1] or dvs[fit.field_id] < fit.bounds[0]: return 0.0
+    bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
+    dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0
+    bv = fit.values[bin_id]
+    dy = fit.values[bin_id + 1] - bv
+    if fit.weight_field_id != -1:
+        return dvs[fit.weight_field_id] * (bv + dd*dy*fit.idbin)
+    return (bv + dd*dy*fit.idbin)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void FIT_eval_transfer(np.float64_t dt, np.float64_t *dvs,
+                            np.float64_t *rgba, int n_fits,
+                            FieldInterpolationTable *fits[6],
+                            int field_table_ids[6]) nogil:
+    cdef int i, fid, use
+    cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+    for i in range(6): istorage[i] = 0.0
+    for i in range(n_fits):
+        istorage[i] = FIT_get_value(fits[i], dvs)
+    for i in range(n_fits):
+        fid = fits[i].weight_table_id
+        if fid != -1: istorage[i] *= istorage[fid]
+    for i in range(6):
+        trgba[i] = istorage[field_table_ids[i]]
+    for i in range(3):
+        ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
+        rgba[i] = dt*trgba[i] + ta * rgba[i]


diff -r 7a5ad9370a6c293a436922b0899493efe89b524a -r f8667ca04152bbd97cec0521a6941c1390247e95 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -30,6 +30,8 @@
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from field_interpolation_tables cimport \
+    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer
 
 from cython.parallel import prange, parallel, threadid
 
@@ -56,17 +58,18 @@
 
 cdef extern from "FixedInterpolator.h":
     np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
-                                  np.float64_t *data)
-    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3], np.float64_t *data)
+                                  np.float64_t *data) nogil
+    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3],
+                                    np.float64_t *data) nogil
     np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
-                                       np.float64_t *data)
+                                       np.float64_t *data) nogil
     void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
-                       np.float64_t grad[3])
-    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval)
+                       np.float64_t grad[3]) nogil
+    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval) nogil
     void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
                        np.float64_t vl[3], np.float64_t dds[3],
                        np.float64_t x, np.float64_t y, np.float64_t z,
-                       int vind1, int vind2)
+                       int vind1, int vind2) nogil
 
 cdef struct VolumeContainer:
     int n_fields
@@ -128,11 +131,13 @@
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[3]
+    void *supp_data
 
 cdef class ImageSampler:
     cdef ImageContainer *image
     cdef sample_function *sampler
     cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef void *supp_data
     def __cinit__(self, 
                   np.ndarray[np.float64_t, ndim=3] vp_pos,
                   np.ndarray vp_dir,
@@ -172,7 +177,6 @@
                 imagec.vd_strides[i] = vp_dir.strides[i] / 8
         else:
             imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
-        self.setup()
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -257,9 +261,11 @@
         cdef int nx = (iter[1] - iter[0])
         cdef int ny = (iter[3] - iter[2])
         cdef int size = nx * ny
+        self.setup(pg)
         if im.vd_strides[0] == -1:
             with nogil, parallel():
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+                idata.supp_data = self.supp_data
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 for j in prange(size, schedule="dynamic"):
                     vj = j % ny
@@ -307,9 +313,59 @@
         im.rgba[i] += vc.data[i][di] * dl
 
 cdef class ProjectionSampler(ImageSampler):
-    def setup(self):
+    def setup(self, PartitionedGrid pg):
         self.sampler = projection_sampler
 
+
+cdef struct VolumeRenderAccumulator:
+    int n_fits
+    int n_samples
+    np.float64_t dvs[6]
+    FieldInterpolationTable *fits[6]
+    int field_table_ids[6]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void volume_render_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vc.n_fields):
+        slopes[i] = offset_interpolate(vc.dims, dp,
+                        vc.data[i] + offset)
+    for i in range(3):
+        dp[i] += ds[i] * vri.n_samples
+    cdef np.float64_t temp
+    for i in range(vc.n_fields):
+        temp = slopes[i]
+        slopes[i] -= offset_interpolate(vc.dims, dp,
+                         vc.data[i] + offset)
+        slopes[i] *= -1.0/vri.n_samples
+        vri.dvs[i] = temp
+    for dti in range(vri.n_samples): 
+        FIT_eval_transfer(dt, vri.dvs, im.rgba, vri.n_fits, vri.fits,
+                          vri.field_table_ids)
+        for i in range(vc.n_fields):
+            vri.dvs[i] += slopes[i]
+
 cdef class GridFace:
     cdef int direction
     cdef public np.float64_t coord



https://bitbucket.org/yt_analysis/yt/changeset/fc17095f5524/
changeset:   fc17095f5524
branch:      yt
user:        MatthewTurk
date:        2011-12-12 21:12:52
summary:     Initial move to the grid_traversal code for transfer-function integration.
Works with threading and gets good scaling results on my machine.
affected #:  4 files

diff -r f8667ca04152bbd97cec0521a6941c1390247e95 -r fc17095f5524360bfc75a33bc1f3eb8e5c776f9c yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -76,13 +76,13 @@
 @cython.cdivision(True)
 cdef inline void FIT_eval_transfer(np.float64_t dt, np.float64_t *dvs,
                             np.float64_t *rgba, int n_fits,
-                            FieldInterpolationTable *fits[6],
+                            FieldInterpolationTable fits[6],
                             int field_table_ids[6]) nogil:
     cdef int i, fid, use
     cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
     for i in range(6): istorage[i] = 0.0
     for i in range(n_fits):
-        istorage[i] = FIT_get_value(fits[i], dvs)
+        istorage[i] = FIT_get_value(&fits[i], dvs)
     for i in range(n_fits):
         fid = fits[i].weight_table_id
         if fid != -1: istorage[i] *= istorage[fid]


diff -r f8667ca04152bbd97cec0521a6941c1390247e95 -r fc17095f5524360bfc75a33bc1f3eb8e5c776f9c yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -138,14 +138,15 @@
     cdef sample_function *sampler
     cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
     cdef void *supp_data
-    def __cinit__(self, 
+    def __init__(self, 
                   np.ndarray[np.float64_t, ndim=3] vp_pos,
                   np.ndarray vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
                   np.ndarray[np.float64_t, ndim=1] x_vec,
-                  np.ndarray[np.float64_t, ndim=1] y_vec):
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  *args, **kwargs):
         self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
         cdef ImageContainer *imagec = self.image
         self.sampler = NULL
@@ -247,6 +248,7 @@
         cdef int iter[4]
         cdef VolumeContainer *vc = pg.container
         cdef ImageContainer *im = self.image
+        self.setup(pg)
         if self.sampler == NULL: raise RuntimeError
         cdef np.float64_t *v_pos, v_dir[3], rgba[6], extrema[4]
         hit = 0
@@ -261,7 +263,6 @@
         cdef int nx = (iter[1] - iter[0])
         cdef int ny = (iter[3] - iter[2])
         cdef int size = nx * ny
-        self.setup(pg)
         if im.vd_strides[0] == -1:
             with nogil, parallel():
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
@@ -316,12 +317,10 @@
     def setup(self, PartitionedGrid pg):
         self.sampler = projection_sampler
 
-
 cdef struct VolumeRenderAccumulator:
     int n_fits
     int n_samples
-    np.float64_t dvs[6]
-    FieldInterpolationTable *fits[6]
+    FieldInterpolationTable *fits
     int field_table_ids[6]
 
 @cython.boundscheck(False)
@@ -343,6 +342,7 @@
                     + index[1] * (vc.dims[2] + 1) + index[2]
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6]
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
         dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
@@ -359,12 +359,62 @@
         slopes[i] -= offset_interpolate(vc.dims, dp,
                          vc.data[i] + offset)
         slopes[i] *= -1.0/vri.n_samples
-        vri.dvs[i] = temp
+        dvs[i] = temp
     for dti in range(vri.n_samples): 
-        FIT_eval_transfer(dt, vri.dvs, im.rgba, vri.n_fits, vri.fits,
+        FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids)
         for i in range(vc.n_fields):
-            vri.dvs[i] += slopes[i]
+            dvs[i] += slopes[i]
+
+cdef class VolumeRenderSampler(ImageSampler):
+    cdef VolumeRenderAccumulator *vra
+    cdef public object tf_obj
+    cdef public object my_field_tables
+    def __cinit__(self, 
+                  np.ndarray[np.float64_t, ndim=3] vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  tf_obj, n_samples = 10):
+        ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+                               x_vec, y_vec)
+        cdef int i
+        cdef np.ndarray[np.float64_t, ndim=1] temp
+        # Now we handle tf_obj
+        self.vra = <VolumeRenderAccumulator *> \
+            malloc(sizeof(VolumeRenderAccumulator))
+        self.vra.fits = <FieldInterpolationTable *> \
+            malloc(sizeof(FieldInterpolationTable) * 6)
+        self.vra.n_fits = tf_obj.n_field_tables
+        print self.vra.n_fits
+        assert(self.vra.n_fits <= 6)
+        self.vra.n_samples = n_samples
+        self.my_field_tables = []
+        for i in range(self.vra.n_fits):
+            temp = tf_obj.tables[i].y
+            FIT_initialize_table(&self.vra.fits[i],
+                      temp.shape[0],
+                      <np.float64_t *> temp.data,
+                      tf_obj.tables[i].x_bounds[0],
+                      tf_obj.tables[i].x_bounds[1],
+                      tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
+                      tf_obj.weight_table_ids[i])
+            self.my_field_tables.append((tf_obj.tables[i],
+                                         tf_obj.tables[i].y))
+        for i in range(6):
+            self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
+        self.supp_data = <void *> self.vra
+
+    def setup(self, PartitionedGrid pg):
+        self.sampler = volume_render_sampler
+
+    def __dealloc__(self):
+        return
+        free(self.vra.fits)
+        free(self.vra)
 
 cdef class GridFace:
     cdef int direction


diff -r f8667ca04152bbd97cec0521a6941c1390247e95 -r fc17095f5524360bfc75a33bc1f3eb8e5c776f9c yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -28,7 +28,8 @@
 import numpy as na
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
-from yt.utilities.amr_utils import PartitionedGrid, kdtree_get_choices
+from yt.utilities.amr_utils import kdtree_get_choices
+from yt.utilities._amr_utils.grid_traversal import PartitionedGrid
 from yt.utilities.performance_counters import yt_counters, time_function
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
@@ -678,7 +679,7 @@
                 if na.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
-                    current_node.brick = PartitionedGrid(current_node.grid.id, len(self.fields), data,
+                    current_node.brick = PartitionedGrid(current_node.grid.id, data,
                                                          current_node.l_corner.copy(), 
                                                          current_node.r_corner.copy(), 
                                                          current_node.dims.astype('int64'))
@@ -708,7 +709,7 @@
                   current_node.li[1]:current_node.ri[1]+1,
                   current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
 
-        current_node.brick = PartitionedGrid(current_node.grid.id, len(self.fields), data,
+        current_node.brick = PartitionedGrid(current_node.grid.id, data,
                                              current_node.l_corner.copy(), 
                                              current_node.r_corner.copy(), 
                                              current_node.dims.astype('int64'))
@@ -1251,7 +1252,7 @@
                 if node.grid is not None:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
-                    node.brick = PartitionedGrid(node.grid.id, len(self.fields), data,
+                    node.brick = PartitionedGrid(node.grid.id, data,
                                                  node.l_corner.copy(), 
                                                  node.r_corner.copy(), 
                                                  node.dims.astype('int64'))


diff -r f8667ca04152bbd97cec0521a6941c1390247e95 -r fc17095f5524360bfc75a33bc1f3eb8e5c776f9c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -40,6 +40,9 @@
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from numpy import pi
 
+from yt.utilities._amr_utils.grid_traversal import \
+    PartitionedGrid, ProjectionSampler, VolumeRenderSampler
+
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
                  resolution, transfer_function,
@@ -318,11 +321,10 @@
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
         positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
         bounds = (px.min(), px.max(), py.min(), py.max())
-        vector_plane = VectorPlane(positions, self.box_vectors[2],
-                                      self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
-        return vector_plane
+        return (positions, self.box_vectors[2],
+                self.back_center, bounds, image,
+                self.unit_vectors[0],
+                self.unit_vectors[1])
 
     def snapshot(self, fn = None, clip_ratio = None):
         r"""Ray-cast the camera.
@@ -346,19 +348,20 @@
         """
         image = na.zeros((self.resolution[0], self.resolution[1], 3),
                          dtype='float64', order='C')
-        vector_plane = self.get_vector_plane(image)
-        tfp = TransferFunctionProxy(self.transfer_function) # Reset it every time
-        tfp.ns = self.sub_samples
+        args = self.get_vector_plane(image)
+        args = args + (self.transfer_function, self.sub_samples)
+        sampler = VolumeRenderSampler(*args)
         self.volume.initialize_source()
 
         pbar = get_pbar("Ray casting",
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         for brick in self.volume.traverse(self.back_center, self.front_center, image):
-            brick.cast_plane(tfp, vector_plane)
+            sampler(brick)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
+        image = sampler.aimage
 
         if self.comm.rank is 0 and fn is not None:
             if clip_ratio is not None:



https://bitbucket.org/yt_analysis/yt/changeset/476f05057264/
changeset:   476f05057264
branch:      yt
user:        MatthewTurk
date:        2011-12-12 21:13:07
summary:     Removed print statement
affected #:  1 file

diff -r fc17095f5524360bfc75a33bc1f3eb8e5c776f9c -r 476f0505726475cf755938b22031cdd20b7af9a7 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -389,7 +389,6 @@
         self.vra.fits = <FieldInterpolationTable *> \
             malloc(sizeof(FieldInterpolationTable) * 6)
         self.vra.n_fits = tf_obj.n_field_tables
-        print self.vra.n_fits
         assert(self.vra.n_fits <= 6)
         self.vra.n_samples = n_samples
         self.my_field_tables = []



https://bitbucket.org/yt_analysis/yt/changeset/c169377086b3/
changeset:   c169377086b3
branch:      yt
user:        MatthewTurk
date:        2011-12-13 05:02:49
summary:     First pass at dynamically calculating the position of vectors.
affected #:  2 files

diff -r 476f0505726475cf755938b22031cdd20b7af9a7 -r c169377086b364264176526a75b926c6c51a21fc yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -138,14 +138,16 @@
     cdef sample_function *sampler
     cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
     cdef void *supp_data
+    cdef np.float64_t width[3]
     def __init__(self, 
-                  np.ndarray[np.float64_t, ndim=3] vp_pos,
+                  np.ndarray vp_pos,
                   np.ndarray vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
                   np.ndarray[np.float64_t, ndim=1] x_vec,
                   np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
                   *args, **kwargs):
         self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
         cdef ImageContainer *imagec = self.image
@@ -165,19 +167,22 @@
         imagec.image = <np.float64_t *> image.data
         imagec.x_vec = <np.float64_t *> x_vec.data
         imagec.y_vec = <np.float64_t *> y_vec.data
-        imagec.nv[0] = vp_pos.shape[0]
-        imagec.nv[1] = vp_pos.shape[1]
+        imagec.nv[0] = image.shape[0]
+        imagec.nv[1] = image.shape[1]
         for i in range(4): imagec.bounds[i] = bounds[i]
         imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
         imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
         for i in range(3):
             imagec.vp_strides[i] = vp_pos.strides[i] / 8
             imagec.im_strides[i] = image.strides[i] / 8
+            self.width[i] = width[i]
         if vp_dir.ndim > 1:
             for i in range(3):
                 imagec.vd_strides[i] = vp_dir.strides[i] / 8
+        elif vp_pos.ndim == 1:
+            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
         else:
-            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
+            raise RuntimeError
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -254,15 +259,20 @@
         hit = 0
         self.calculate_extent(extrema, vc)
         self.get_start_stop(extrema, iter)
-        iter[0] = iclip(iter[0]-1, 0, im.nv[0])
-        iter[1] = iclip(iter[1]+1, 0, im.nv[0])
-        iter[2] = iclip(iter[2]-1, 0, im.nv[1])
-        iter[3] = iclip(iter[3]+1, 0, im.nv[1])
+        iter[0] = iclip(iter[0]-1, 0, im.nv[0]-1)
+        iter[1] = iclip(iter[1]+1, 0, im.nv[0]-1)
+        iter[2] = iclip(iter[2]-1, 0, im.nv[1]-1)
+        iter[3] = iclip(iter[3]+1, 0, im.nv[1]-1)
         cdef ImageAccumulator *idata
         cdef void *data
         cdef int nx = (iter[1] - iter[0])
         cdef int ny = (iter[3] - iter[2])
         cdef int size = nx * ny
+        cdef np.float64_t px, py 
+        cdef np.float64_t width[3] 
+        for i in range(3):
+            width[i] = self.width[i]
+        #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
         if im.vd_strides[0] == -1:
             with nogil, parallel():
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
@@ -272,14 +282,19 @@
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]
-                    offset = im.vp_strides[0] * vi + im.vp_strides[1] * vj
-                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                    # Dynamically calculate the position
+                    px = width[0] * (<float>vi)/(<float>im.nv[0]) - width[0]/2.0
+                    py = width[1] * (<float>vj)/(<float>im.nv[1]) - width[1]/2.0
+                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
                     offset = im.im_strides[0] * vi + im.im_strides[1] * vj
                     for i in range(3): idata.rgba[i] = im.image[i + offset]
                     walk_volume(vc, v_pos, im.vp_dir, self.sampler,
                                 (<void *> idata))
                     for i in range(3): im.image[i + offset] = idata.rgba[i]
                 free(idata)
+                free(v_pos)
         else:
             # If we do not have an orthographic projection, we have to cast all
             # our rays (until we can get an extrema calculation...)
@@ -371,16 +386,17 @@
     cdef public object tf_obj
     cdef public object my_field_tables
     def __cinit__(self, 
-                  np.ndarray[np.float64_t, ndim=3] vp_pos,
+                  np.ndarray vp_pos,
                   np.ndarray vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
                   np.ndarray[np.float64_t, ndim=1] x_vec,
                   np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
                   tf_obj, n_samples = 10):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec)
+                               x_vec, y_vec, width)
         cdef int i
         cdef np.ndarray[np.float64_t, ndim=1] temp
         # Now we handle tf_obj


diff -r 476f0505726475cf755938b22031cdd20b7af9a7 -r c169377086b364264176526a75b926c6c51a21fc yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -348,8 +348,13 @@
         """
         image = na.zeros((self.resolution[0], self.resolution[1], 3),
                          dtype='float64', order='C')
-        args = self.get_vector_plane(image)
-        args = args + (self.transfer_function, self.sub_samples)
+        rotp = na.concatenate([self.inv_mat.ravel('F'), self.back_center.ravel()])
+        args = (rotp, self.box_vectors[2], self.back_center,
+                (-self.width[0]/2.0, self.width[0]/2.0,
+                 -self.width[1]/2.0, self.width[1]/2.0),
+                image, self.unit_vectors[0], self.unit_vectors[1],
+                na.array(self.width),
+                self.transfer_function, self.sub_samples)
         sampler = VolumeRenderSampler(*args)
         self.volume.initialize_source()
 



https://bitbucket.org/yt_analysis/yt/changeset/f16d86a762ae/
changeset:   f16d86a762ae
branch:      yt
user:        MatthewTurk
date:        2011-12-13 13:48:55
summary:     This fixes vector position calculation.
affected #:  1 file

diff -r c169377086b364264176526a75b926c6c51a21fc -r f16d86a762ae79cb99c27a944400ca7d58af93bd yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -283,8 +283,8 @@
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]
                     # Dynamically calculate the position
-                    px = width[0] * (<float>vi)/(<float>im.nv[0]) - width[0]/2.0
-                    py = width[1] * (<float>vj)/(<float>im.nv[1]) - width[1]/2.0
+                    px = width[0] * (<float>vi)/(<float>im.nv[0]-1) - width[0]/2.0
+                    py = width[1] * (<float>vj)/(<float>im.nv[1]-1) - width[1]/2.0
                     v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
                     v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
                     v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]



https://bitbucket.org/yt_analysis/yt/changeset/d91a8b84ec5a/
changeset:   d91a8b84ec5a
branch:      yt
user:        MatthewTurk
date:        2011-12-14 20:38:14
summary:     I think this fixes non-uniform projections like fisheye and healpix with the
threaded volume renderer.
affected #:  1 file

diff -r f16d86a762ae79cb99c27a944400ca7d58af93bd -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -201,22 +201,6 @@
         rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
         rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
 
-    cdef inline void copy_into(self, np.float64_t *fv, np.float64_t *tv,
-                        int i, int j, int nk, int strides[3]) nogil:
-        # We know the first two dimensions of our from-vector, and our
-        # to-vector is flat and 'ni' long
-        cdef int k
-        cdef int offset = strides[0] * i + strides[1] * j
-        for k in range(nk):
-            tv[k] = fv[offset + k]
-
-    cdef inline void copy_back(self, np.float64_t *fv, np.float64_t *tv,
-                        int i, int j, int nk, int strides[3]) nogil:
-        cdef int k
-        cdef int offset = strides[0] * i + strides[1] * j
-        for k in range(nk):
-            tv[offset + k] = fv[k]
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void calculate_extent(self, np.float64_t extrema[4],
@@ -255,7 +239,7 @@
         cdef ImageContainer *im = self.image
         self.setup(pg)
         if self.sampler == NULL: raise RuntimeError
-        cdef np.float64_t *v_pos, v_dir[3], rgba[6], extrema[4]
+        cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4]
         hit = 0
         self.calculate_extent(extrema, vc)
         self.get_start_stop(extrema, iter)
@@ -273,11 +257,11 @@
         for i in range(3):
             width[i] = self.width[i]
         #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
-        if im.vd_strides[0] == -1:
-            with nogil, parallel():
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        with nogil, parallel():
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            if im.vd_strides[0] == -1:
                 for j in prange(size, schedule="dynamic"):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
@@ -293,21 +277,24 @@
                     walk_volume(vc, v_pos, im.vp_dir, self.sampler,
                                 (<void *> idata))
                     for i in range(3): im.image[i + offset] = idata.rgba[i]
-                free(idata)
-                free(v_pos)
-        else:
-            # If we do not have an orthographic projection, we have to cast all
-            # our rays (until we can get an extrema calculation...)
-            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-            data = <void *> idata
-            for vi in range(im.nv[0]):
-                for vj in range(im.nv[1]):
-                    for i in range(4): idata.rgba[i] = 0.0
-                    self.copy_into(im.vp_pos, v_pos, vi, vj, 3, im.vp_strides)
-                    self.copy_into(im.image, idata.rgba, vi, vj, 3, im.im_strides)
-                    self.copy_into(im.vp_dir, v_dir, vi, vj, 3, im.vd_strides)
+            else:
+                # If we do not have a simple image plane, we have to cast all
+                # our rays 
+                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+                for j in prange(size, schedule="dynamic"):
+                    vj = j % ny
+                    vi = (j - vj) / ny + iter[0]
+                    vj = vj + iter[2]
+                    offset = im.vp_strides[0] * vi + im.vp_strides[1] * vj
+                    for i in range(3): v_pos[i] = im.vp_dir[i + offset]
+                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    offset = im.vd_strides[0] * vi + im.vd_strides[1] * vj
+                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
                     walk_volume(vc, v_pos, v_dir, self.sampler, data)
-                    self.copy_back(idata.rgba, im.image, vi, vj, 3, im.im_strides)
+                free(v_dir)
+            free(idata)
+            free(v_pos)
         return hit
 
 cdef void projection_sampler(



https://bitbucket.org/yt_analysis/yt/changeset/43fc7bb777fb/
changeset:   43fc7bb777fb
branch:      geometry_handling
user:        MatthewTurk
date:        2011-12-16 12:09:37
summary:     Merging from volume_refactor bookmark
affected #:  33 files

diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -8,6 +8,7 @@
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
+yt/utilities/spatial/ckdtree.c
 yt/utilities/_amr_utils/CICDeposit.c
 yt/utilities/_amr_utils/ContourFinding.c
 yt/utilities/_amr_utils/DepthFirstOctree.c


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -7,12 +7,14 @@
 
 doc = """\
 
-Welcome to yt!
+==================
+| Welcome to yt! |
+==================
 
 """
 
 try:
-    import IPython.Shell
+    import IPython
 except:
     print 'ipython is not available. using default python interpreter.'
     import code
@@ -20,7 +22,12 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
-if "DISPLAY" in os.environ:
+if IPython.__version__.startswith("0.10"):
+    api_version = '0.10'
+elif IPython.__version__.startswith("0.11"):
+    api_version = '0.11'
+
+if api_version == "0.10" and "DISPLAY" in os.environ:
     from matplotlib import rcParams
     ipbackends = dict(Qt4 = IPython.Shell.IPShellMatplotlibQt4,
                       WX  = IPython.Shell.IPShellMatplotlibWX,
@@ -32,8 +39,15 @@
         ip_shell = ipbackends[bend](user_ns=namespace)
     except KeyError:
         ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
+elif api_version == "0.10":
+    ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
+elif api_version == "0.11":
+    from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+    ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
+                    display_banner = True)
+    if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
 else:
-    ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
+    raise RuntimeError
 
 # The rest is a modified version of the IPython default profile code
 
@@ -58,220 +72,22 @@
 # Most of your config files and extensions will probably start with this import
 
 #import IPython.ipapi
-ip = ip_shell.IP.getapi()
+if api_version == "0.10":
+    ip = ip_shell.IP.getapi()
+    try_next = IPython.ipapi.TryNext
+    kwargs = dict(sys_exit=1, banner=doc)
+elif api_version == "0.11":
+    ip = ip_shell
+    try_next = IPython.core.error.TryNext
+    kwargs = dict()
 
-# You probably want to uncomment this if you did %upgrade -nolegacy
-# import ipy_defaults    
-
-import os   
-import glob
-import itertools
-
-def main():   
-
-    # uncomment if you want to get ipython -p sh behaviour
-    # without having to use command line switches  
-    # import ipy_profile_sh
-
-    # Configure your favourite editor?
-    # Good idea e.g. for %edit os.path.isfile
-
-    #import ipy_editors
-    
-    # Choose one of these:
-    
-    #ipy_editors.scite()
-    #ipy_editors.scite('c:/opt/scite/scite.exe')
-    #ipy_editors.komodo()
-    #ipy_editors.idle()
-    # ... or many others, try 'ipy_editors??' after import to see them
-    
-    # Or roll your own:
-    #ipy_editors.install_editor("c:/opt/jed +$line $file")
-    
-    
-    o = ip.options
-    # An example on how to set options
-    #o.autocall = 1
-    o.system_verbose = 0
-    
-    #import_all("os sys")
-    #execf('~/_ipython/ns.py')
-
-
-    # -- prompt
-    # A different, more compact set of prompts from the default ones, that
-    # always show your current location in the filesystem:
-
-    #o.prompt_in1 = r'\C_LightBlue[\C_LightCyan\Y2\C_LightBlue]\C_Normal\n\C_Green|\#>'
-    #o.prompt_in2 = r'.\D: '
-    #o.prompt_out = r'[\#] '
-    
-    # Try one of these color settings if you can't read the text easily
-    # autoexec is a list of IPython commands to execute on startup
-    #o.autoexec.append('%colors LightBG')
-    #o.autoexec.append('%colors NoColor')
-    #o.autoexec.append('%colors Linux')
-    
-    # for sane integer division that converts to float (1/2 == 0.5)
-    #o.autoexec.append('from __future__ import division')
-    
-    # For %tasks and %kill
-    #import jobctrl 
-    
-    # For autoreloading of modules (%autoreload, %aimport)    
-    #import ipy_autoreload
-    
-    # For winpdb support (%wdb)
-    #import ipy_winpdb
-    
-    # For bzr completer, requires bzrlib (the python installation of bzr)
-    #ip.load('ipy_bzr')
-    
-    # Tab completer that is not quite so picky (i.e. 
-    # "foo".<TAB> and str(2).<TAB> will work). Complete 
-    # at your own risk!
-    #import ipy_greedycompleter
-    
-from UserDict import UserDict
-class ParameterFileDict(UserDict):
-    def __init__(self):
-        # We accept no contributions
-        UserDict.__init__(self)
-        self._key_numbers = {}
-        self._nn = 0
-    def __setitem__(self, key, value):
-        if isinstance(key, int): raise KeyError
-        UserDict.__setitem__(self, key, value)
-        self._key_numbers[self._nn] = key
-        self._nn += 1
-    def __getitem__(self, key):
-        if isinstance(key, int):
-            return self[self._key_numbers[key]]
-        return UserDict.__getitem__(self, key)
-    def __iter__(self):
-        return itertools.chain(self.field_data.iterkeys(),
-                        self._key_numbers.iterkeys())
-    def __repr__(self):
-        s = "{" + ", \n ".join(
-                [" '(%s, %s)': %s" % (i, self._key_numbers[i], self[i])
-                    for i in sorted(self._key_numbers)]) + "}"
-        return s
-    def has_key(self, key):
-        return self.field_data.has_key(key) or self._key_numbers.has_key(key)
-    def keys(self):
-        return self.field_data.key(key) + self._key_numbers.key(key)
-
-pfs = ParameterFileDict()
-pcs = []
-ip.user_ns['pf'] = None
-ip.user_ns['pfs'] = pfs
-ip.user_ns['pc'] = None
-ip.user_ns['pcs'] = pcs
 ip.ex("from yt.mods import *")
 
-def do_pfall(self, arg):
-    if arg.strip() == "": arg = 0
-    for i in range(int(arg)+1):
-        for f in sorted(glob.glob("".join(["*/"]*i) + "*.hierarchy" )):
-            #print i, f
-            fn = f[:-10]
-            # Make this a bit smarter
-            ip.user_ns['pfs'][fn] = EnzoStaticOutput(fn)
-    ip.ex("print pfs")
-
-ip.expose_magic("pfall", do_pfall)
-
-def _parse_pf(arg):
-    if arg.strip() == "":
-        if ip.user_ns.get('pf', None) is not None:
-            return ip.user_ns['pf']
-        elif len(pfs) > 0:
-            return pfs[0]
-    else:
-        if pfs.has_key(arg):
-            return pfs[arg]
-        if pfs.has_key(int(arg)):
-            return pfs[int(arg)]
-        return EnzoStaticOutput(arg)
-    raise KeyError
-        
-def do_slice(self, arg):
-    pc = None
-    if len(arg.split()) == 3:
-        pfn, field, axis = arg.split()
-        pf = _parse_pf(arg.split()[0])
-    elif len(arg.split()) == 2:
-        field, axis = arg.split()
-        pf = _parse_pf("")
-        if ip.user_ns.get('pc', None) is not None and \
-           ip.user_ns['pc'].parameter_file is pf:
-            pf = ip.user_ns['pc']
-    else:
-        print "Need either two or three arguments."
-        return
-    axis = int(axis)
-    if pc is None: pc = PlotCollectionInteractive(pf)
-    pc.add_slice(field, axis)
-    print "Setting pcs[%s] = New PlotCollection" % len(pcs)
-    ip.user_ns['pcs'].append(pc)
-    if ip.user_ns.get('pc', None) is None: ip.user_ns['pc'] = pc
-    return pc
-
-ip.expose_magic("pcslicer", do_slice)
-
-def do_width(self, arg):
-    if ip.user_ns.get("pc", None) is None:
-        print "No 'pc' defined"
-        return
-    if len(arg.split()) == 2:
-        w, u = arg.split()
-    else:
-        w, u = arg, '1'
-    ip.user_ns['pc'].set_width(float(w), u)
-ip.expose_magic("width", do_width)
-
-def do_zoom(self, arg):
-    if ip.user_ns.get("pc", None) is None:
-        print "No 'pc' defined"
-        return
-    pc = ip.user_ns['pc']
-    w = None
-    for p in pc:
-        if hasattr(p, 'width'): w = p.width
-    if w is None: print "No zoomable plots defined"
-    w /= float(arg)
-    pc.set_width(w, '1')
-ip.expose_magic("zoom", do_zoom)
-    
-def do_setup_pf(self, arg):
-    if pfs.has_key(arg): ip.user_ns['pf'] = pfs[arg]
-    iarg = -1
-    try:
-        iarg = int(arg)
-    except ValueError: pass
-    if pfs.has_key(iarg): ip.user_ns['pf'] = pfs[iarg]
-    print ip.user_ns['pf']
-    
-ip.expose_magic("gpf", do_setup_pf)
-
-# some config helper functions you can use 
-def import_all(modules):
-    """ Usage: import_all("os sys") """ 
-    for m in modules.split():
-        ip.ex("from %s import *" % m)
-        
-def execf(fname):
-    """ Execute a file in user namespace """
-    ip.ex('execfile("%s")' % os.path.expanduser(fname))
-
-#main()
-
-
 # Now we add some tab completers, in the vein of:
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py
 # We'll start with some fields.
 
+import re
 def yt_fieldname_completer(self, event):
     """Match dictionary completions"""
     #print "python_matches", event.symbol
@@ -284,7 +100,7 @@
     m = re.match(r"(\S+(\.\w+)*)\[[\'\\\"](\w*)$", text)
 
     if not m:
-        raise IPython.ipapi.TryNext 
+        raise try_next
     
     expr, attr = m.group(1, 3)
     #print "COMPLETING ON ", expr, attr
@@ -308,8 +124,8 @@
         return all_fields
 
 
-    raise IPython.ipapi.TryNext 
+    raise try_next
 
 ip.set_hook('complete_command', yt_fieldname_completer , re_key = ".*" )
 
-ip_shell.mainloop(sys_exit=1,banner=doc)
+ip_shell.mainloop(**kwargs)


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1323,6 +1323,7 @@
     _name = "Loaded"
     
     def __init__(self, pf, basename):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._groups = []
         self.basename = basename
@@ -1364,7 +1365,10 @@
             # Prepend the hdf5 file names with the full path.
             temp = []
             for item in line[1:]:
-                temp.append(self.pf.fullpath + '/' + item)
+                if item[0] == "/":
+                    temp.append(item)
+                else:
+                    temp.append(self.pf.fullpath + '/' + item)
             locations.append(temp)
         lines.close()
         return locations


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -464,6 +464,7 @@
                         parent_masses = na.concatenate((parent_masses, thisMasses))
                         parent_halos = na.concatenate((parent_halos, 
                             na.ones(thisIDs.size, dtype='int32') * gID))
+                        del thisIDs, thisMasses
                     h5fp.close()
             
             # Sort the arrays by particle index in ascending order.
@@ -495,6 +496,7 @@
                     child_masses = na.concatenate((child_masses, thisMasses))
                     child_halos = na.concatenate((child_halos, 
                         na.ones(thisIDs.size, dtype='int32') * gID))
+                    del thisIDs, thisMasses
                 h5fp.close()
         
         # Sort the arrays by particle index.
@@ -548,6 +550,7 @@
         parent_halos_tosend = parent_halos[parent_send]
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
+        del parent_send, child_send
         
         parent_IDs_tosend = self.comm.par_combine_object(parent_IDs_tosend,
                 datatype="array", op="cat")
@@ -651,6 +654,11 @@
             #values = tuple(values)
             self.write_values.append(values)
             self.write_values_dict[parent_currt][parent_halo] = values
+
+        # Clean up.
+        del parent_IDs, parent_masses, parent_halos
+        del parent_IDs_tosend, parent_masses_tosend
+        del parent_halos_tosend, child_IDs_tosend, child_halos_tosend
         
         return (child_IDs, child_masses, child_halos)
 


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -35,6 +35,7 @@
                  function=None, clump_info=None):
         self.parent = parent
         self.data = data
+        self.quantities = data.quantities
         self.field = field
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
@@ -167,6 +168,7 @@
     # unreliable in the unpickling
     for child in children: child.parent = obj
     obj.data = data[1] # Strip out the PF
+    obj.quantities = obj.data.quantities
     if obj.parent is None: return (data[0], obj)
     return obj
 


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -78,11 +78,11 @@
     and ensures that after the function is called, the field_parameters will
     be returned to normal.
     """
-    def save_state(self, grid, field=None):
+    def save_state(self, grid, field=None, *args, **kwargs):
         old_params = grid.field_parameters
         old_keys = grid.field_data.keys()
         grid.field_parameters = self.field_parameters
-        tr = func(self, grid, field)
+        tr = func(self, grid, field, *args, **kwargs)
         grid.field_parameters = old_params
         grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
         return tr
@@ -322,8 +322,28 @@
             pass
         del self.field_data[key]
 
-    def _generate_field_in_grids(self, fieldName):
-        pass
+    def _generate_field(self, field):
+        if self.pf.field_info.has_key(field):
+            # First we check the validator
+            try:
+                self.pf.field_info[field].check_available(self)
+            except NeedsGridType, ngt_exception:
+                # We leave this to be implementation-specific
+                self._generate_field_in_grids(field, ngt_exception.ghost_zones)
+                return False
+            else:
+                self[field] = self.pf.field_info[field](self)
+                return True
+        else: # Can't find the field, try as it might
+            raise KeyError(field)
+
+    def _generate_field_in_grids(self, field, num_ghost_zones=0):
+        for grid in self._grids:
+            grid[field] = self.__touch_grid_field(grid, field)
+
+    @restore_grid_state
+    def __touch_grid_field(self, grid, field):
+        return grid[field]
 
     _key_fields = None
     def write_out(self, filename, fields=None, format="%0.16e"):
@@ -454,25 +474,6 @@
         self._sortkey = None
         self._sorted = {}
 
-    def _generate_field_in_grids(self, field, num_ghost_zones=0):
-        for grid in self._grids:
-            temp = grid[field]
-
-    def _generate_field(self, field):
-        if self.pf.field_info.has_key(field):
-            # First we check the validator
-            try:
-                self.pf.field_info[field].check_available(self)
-            except NeedsGridType, ngt_exception:
-                # We leave this to be implementation-specific
-                self._generate_field_in_grids(field, ngt_exception.ghost_zones)
-                return False
-            else:
-                self[field] = self.pf.field_info[field](self)
-                return True
-        else: # Can't find the field, try as it might
-            raise KeyError(field)
-
     def get_data(self, fields=None, in_grids=False):
         if self._grids == None:
             self._get_list_of_grids()
@@ -561,6 +562,7 @@
                 self.hierarchy.grid_right_edge)
         self._grids = self.hierarchy.grids[gi]
 
+    @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         # We are orthogonal, so we can feel free to make assumptions
         # for the sake of speed.
@@ -634,6 +636,7 @@
                 self.hierarchy.grid_right_edge)
         self._grids = self.hierarchy.grids[gi]
 
+    @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         mask = na.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
@@ -716,6 +719,7 @@
         p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
+    @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         mask = na.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
@@ -805,25 +809,6 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-    def _generate_field(self, field):
-        if self.pf.field_info.has_key(field):
-            # First we check the validator
-            try:
-                self.pf.field_info[field].check_available(self)
-            except NeedsGridType, ngt_exception:
-                # We leave this to be implementation-specific
-                self._generate_field_in_grids(field, ngt_exception.ghost_zones)
-                return False
-            else:
-                self[field] = self.pf.field_info[field](self)
-                return True
-        else: # Can't find the field, try as it might
-            raise KeyError(field)
-
-    def _generate_field_in_grids(self, field, num_ghost_zones=0):
-        for grid in self._grids:
-            temp = grid[field]
-
     def to_frb(self, width, resolution, center = None):
         if center is None:
             center = self.get_field_parameter("center")
@@ -1600,9 +1585,9 @@
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
             dependencies = self.get_dependencies(fields, ghost_zones = False)
-            print "Preloading %s grids and getting %s" % (
-                    len(self.source._get_grid_objs()),
-                    dependencies)
+            mylog.debug("Preloading %s grids and getting %s",
+                            len(self.source._get_grid_objs()),
+                            dependencies)
             self.comm.preload([g for g in self._get_grid_objs()],
                           dependencies, self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
@@ -2233,6 +2218,7 @@
                 dls[level].append(float(just_one(grid['d%s' % axis_names[self.axis]])))
         return dls
 
+    @restore_grid_state
     def _get_data_from_grid(self, grid, fields, dls):
         g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
@@ -2370,29 +2356,6 @@
             grid[field] = new_field
             i += np
 
-    def _generate_field(self, field):
-        if self.pf.field_info.has_key(field):
-            # First we check the validator
-            try:
-                self.pf.field_info[field].check_available(self)
-            except NeedsGridType, ngt_exception:
-                # We leave this to be implementation-specific
-                self._generate_field_in_grids(field, ngt_exception.ghost_zones)
-                return False
-            else:
-                self[field] = self.pf.field_info[field](self)
-                return True
-        else: # Can't find the field, try as it might
-            raise KeyError(field)
-
-    def _generate_field_in_grids(self, field, num_ghost_zones=0):
-        for grid in self._grids:
-            self.__touch_grid_field(grid, field)
-
-    @restore_grid_state
-    def __touch_grid_field(self, grid, field):
-        grid[field]
-
     def _is_fully_enclosed(self, grid):
         return na.all(self._get_cut_mask)
 
@@ -2680,6 +2643,31 @@
     _type_name = "extracted_region"
     _con_args = ('_base_region', '_indices')
     def __init__(self, base_region, indices, force_refresh=True, **kwargs):
+        """An arbitrarily defined data container that allows for selection
+        of all data meeting certain criteria.
+
+        In order to create an arbitrarily selected set of data, the
+        ExtractedRegion takes a `base_region` and a set of `indices`
+        and creates a region within the `base_region` consisting of
+        all data indexed by the `indices`. Note that `indices` must be
+        precomputed. This does not work well for parallelized
+        operations.
+
+        Parameters
+        ----------
+        base_region : yt data source
+            A previously selected data source.
+        indices : array_like
+            An array of indices
+
+        Other Parameters
+        ----------------
+        force_refresh : bool
+           Force a refresh of the data. Defaults to True.
+        
+        Examples
+        --------
+        """
         cen = kwargs.pop("center", None)
         if cen is None: cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,
@@ -2963,10 +2951,22 @@
     _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        """
-        We create an object with a set of three *left_edge* coordinates,
-        three *right_edge* coordinates, and a *center* that need not be the
-        center.
+        """A 3D region of data with an arbitrary center.
+
+        Takes an array of three *left_edge* coordinates, three
+        *right_edge* coordinates, and a *center* that can be anywhere
+        in the domain. If the selected region extends past the edges
+        of the domain, no data will be found there, though the
+        object's `left_edge` or `right_edge` are not modified.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the region
+        left_edge : array_like
+            The left edge of the region
+        right_edge : array_like
+            The right edge of the region
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self.left_edge = left_edge
@@ -3011,10 +3011,25 @@
     _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        """
-        We create an object with a set of three *left_edge* coordinates,
-        three *right_edge* coordinates, and a *center* that need not be the
-        center.
+        """A 3D region of data that with periodic boundary
+        conditions if the selected region extends beyond the
+        simulation domain.
+
+        Takes an array of three *left_edge* coordinates, three
+        *right_edge* coordinates, and a *center* that can be anywhere
+        in the domain. The selected region can extend past the edges
+        of the domain, in which case periodic boundary conditions will
+        be applied to fill the region.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the region
+        left_edge : array_like
+            The left edge of the region
+        right_edge : array_like
+            The right edge of the region
+
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self.left_edge = na.array(left_edge)
@@ -3062,6 +3077,15 @@
     """
     _type_name = "periodic_region_strict"
     _dx_pad = 0.0
+    def __init__(self, center, left_edge, right_edge, fields = None,
+                 pf = None, **kwargs):
+        """same as periodic region, but does not include cells unless
+        the selected region encompasses their centers.
+
+        """
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+                                       fields = None, pf = None, **kwargs)
+    
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3103,9 +3127,20 @@
     _type_name = "sphere"
     _con_args = ('center', 'radius')
     def __init__(self, center, radius, fields = None, pf = None, **kwargs):
-        """
-        The most famous of all the data objects, we define it via a
-        *center* and a *radius*.
+        """A sphere f points defined by a *center* and a *radius*.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the sphere.
+        radius : float
+            The radius of the sphere.
+
+        Examples
+        --------
+        >>> pf = load("DD0010/moving7_0010")
+        >>> c = [0.5,0.5,0.5]
+        >>> sphere = pf.h.sphere(c,1.*pf['kpc'])
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         # Unpack the radius, if necessary
@@ -3153,6 +3188,19 @@
     _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
+        """A 3D region with all data extracted to a single, specified
+        resolution.
+        
+        Parameters
+        ----------
+        level : int
+            The resolution level data is uniformly gridded at
+        left_edge : array_like
+            The left edge of the region to be extracted
+        right_edge : array_like
+            The right edge of the region to be extracted
+
+        """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = na.array(left_edge)
@@ -3286,6 +3334,24 @@
     _type_name = "smoothed_covering_grid"
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
+        """A 3D region with all data extracted and interpolated to a
+        single, specified resolution.
+
+        Smoothed covering grids start at level 0, interpolating to
+        fill the region to level 1, replacing any cells actually
+        covered by level 1 data, and then recursively repeating this
+        process until it reaches the specified `level`.
+        
+        Parameters
+        ----------
+        level : int
+            The resolution level data is uniformly gridded at
+        left_edge : array_like
+            The left edge of the region to be extracted
+        right_edge : array_like
+            The right edge of the region to be extracted
+
+        """
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))
@@ -3378,6 +3444,7 @@
                                    output_field, output_left)
             self.field_data[field] = output_field
 
+    @restore_grid_state
     def _get_data_from_grid(self, grid, fields):
         fields = ensure_list(fields)
         g_fields = [grid[field].astype("float64") for field in fields]


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,24 +151,11 @@
     """
     baryon_mass = data["CellMassMsun"].sum()
     particle_mass = data["ParticleMassMsun"].sum()
-    return baryon_mass, particle_mass
-def _combTotalMass(data, baryon_mass, particle_mass):
-    return baryon_mass.sum() + particle_mass.sum()
+    return [baryon_mass + particle_mass]
+def _combTotalMass(data, total_mass):
+    return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,
-             combine_function=_combTotalMass, n_ret = 2)
-
-def _MatterMass(data):
-    """
-    This function takes no arguments and returns the array sum of cell masses
-    and particle masses.
-    """
-    cellvol = data["CellVolume"]
-    matter_rho = data["Matter_Density"]
-    return cellvol, matter_rho 
-def _combMatterMass(data, cellvol, matter_rho):
-    return cellvol*matter_rho
-add_quantity("MatterMass", function=_MatterMass,
-	     combine_function=_combMatterMass, n_ret=2)
+             combine_function=_combTotalMass, n_ret=1)
 
 def _CenterOfMass(data, use_cells=True, use_particles=False):
     """
@@ -358,7 +345,7 @@
     Add the mass contribution of particles if include_particles = True
     """
     if (include_particles):
-	mass_to_use = data.quantities["MatterMass"]()[0] 
+	mass_to_use = data["TotalMass"]
     else:
 	mass_to_use = data["CellMass"]
     kinetic = 0.5 * (mass_to_use * (


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -91,11 +91,10 @@
         # Because we need an instantiated class to check the pf's existence in
         # the cache, we move that check to here from __new__.  This avoids
         # double-instantiation.
-        if ytcfg.getboolean('yt', 'serialize'):
-            try:
-                _pf_store.check_pf(self)
-            except NoParameterShelf:
-                pass
+        try:
+            _pf_store.check_pf(self)
+        except NoParameterShelf:
+            pass
         self.print_key_parameters()
 
         self.create_field_info()


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -112,7 +112,8 @@
         outputs = []
         for line in open(output_log):
             if not line.startswith(line_prefix): continue
-            fn = line[len(line_prefix):].strip()
+            cut_line = line[len(line_prefix):].strip()
+            fn = cut_line.split()[0]
             outputs.append(load(fn))
         obj = cls(outputs)
         return obj


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -30,8 +30,6 @@
 import inspect
 import copy
 
-from math import pi
-
 from yt.funcs import *
 
 from yt.utilities.amr_utils import CICDeposit_3, obtain_rvec
@@ -266,7 +264,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -295,6 +293,7 @@
 
 def _TotalMass(field,data):
     return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
           convert_function=_convertCellMassMsun)


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -575,24 +575,24 @@
         reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
         # Initial setup:
         mylog.debug("Reconstructing parent-child relationships")
-        self.grids = []
+        grids = []
         # We enumerate, so it's 0-indexed id and 1-indexed pid
         self.filenames = ["-1"] * self.num_grids
         for id,pid in enumerate(reverse_tree):
-            self.grids.append(self.grid(id+1, self))
-            self.grids[-1].Level = self.grid_levels[id, 0]
+            grids.append(self.grid(id+1, self))
+            grids[-1].Level = self.grid_levels[id, 0]
             if pid > 0:
-                self.grids[-1]._parent_id = pid
-                self.grids[pid-1]._children_ids.append(self.grids[-1].id)
+                grids[-1]._parent_id = pid
+                grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
         self.grids = na.empty(len(grids), dtype='object')
-        for i, grid in enumerate(self.grids):
+        for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
             grid._prepare_grid()
             grid.proc_num = self.grid_procs[i,0]
-            self.grids[gi] = grid
+            self.grids[i] = grid
         mylog.debug("Prepared")
 
     def _initialize_grid_arrays(self):


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -136,14 +136,25 @@
         offset = 7
         ii = na.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
+        first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
             gi = g.id - g._id_offset
             # FLASH uses 1-indexed group info
-            g.Children = [self.grids[i - 1] for i in gid[gi,7:] if i > -1]
+            g.Children = [self.grids[i - 1] for i in gid[gi,first_ind:] if i > -1]
             for g1 in g.Children:
                 g1.Parent = g
             g._prepare_grid()
             g._setup_dx()
+        if self.parameter_file.dimensionality < 3:
+            DD = (self.parameter_file.domain_right_edge[2] -
+                  self.parameter_file.domain_left_edge[2])
+            for g in self.grids:
+                g.dds[2] = DD
+        if self.parameter_file.dimensionality < 2:
+            DD = (self.parameter_file.domain_right_edge[1] -
+                  self.parameter_file.domain_left_edge[1])
+            for g in self.grids:
+                g.dds[1] = DD
         self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):
@@ -183,7 +194,6 @@
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.
-        self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'flash' # always PPM DE
         self.parameters["Time"] = 1. # default unit is 1...
@@ -255,6 +265,7 @@
     def _find_parameter(self, ptype, pname, scalar = False):
         nn = "/%s %s" % (ptype,
                 {False: "runtime parameters", True: "scalars"}[scalar])
+        if nn not in self._handle: raise KeyError(nn)
         for tpname, pval in self._handle[nn][:]:
             if tpname.strip() == pname:
                 return pval
@@ -281,12 +292,20 @@
             nxb = self._find_parameter("integer", "nxb", scalar = True)
             nyb = self._find_parameter("integer", "nyb", scalar = True)
             nzb = self._find_parameter("integer", "nzb", scalar = True)
+            dimensionality = self._find_parameter("integer", "dimensionality",
+                                    scalar = True)
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
+            dimensionality = 3
+            if nzb == 1: dimensionality = 2
+            if nyb == 1: dimensionality = 1
+            if dimensionality < 3:
+                mylog.warning("Guessing dimensionality as %s", dimensionality)
         nblockx = self._find_parameter("integer", "nblockx")
         nblocky = self._find_parameter("integer", "nblockx")
         nblockz = self._find_parameter("integer", "nblockx")
+        self.dimensionality = dimensionality
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 
@@ -299,7 +318,7 @@
 
         try:
             use_cosmo = self._find_parameter("logical", "usecosmology") 
-        except KeyError:
+        except:
             use_cosmo = 0
 
         if use_cosmo == 1:


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -124,14 +124,14 @@
     """
     Returning resident size in megabytes
     """
+    pid = os.getpid()
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return 0
-    pid = os.getpid()
+        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return 0.0
+        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs
@@ -255,16 +255,32 @@
     *num_up* refers to how many frames of the stack get stripped off, and
     defaults to 1 so that this function itself is stripped off.
     """
-    from IPython.Shell import IPShellEmbed
+
+    import IPython
+    if IPython.__version__.startswith("0.10"):
+       api_version = '0.10'
+    elif IPython.__version__.startswith("0.11"):
+       api_version = '0.11'
+
     stack = inspect.stack()
     frame = inspect.stack()[num_up]
     loc = frame[0].f_locals.copy()
     glo = frame[0].f_globals
     dd = dict(fname = frame[3], filename = frame[1],
               lineno = frame[2])
-    ipshell = IPShellEmbed()
-    ipshell(header = __header % dd,
-            local_ns = loc, global_ns = glo)
+    if api_version == '0.10':
+        ipshell = IPython.Shell.IPShellEmbed()
+        ipshell(header = __header % dd,
+                local_ns = loc, global_ns = glo)
+    else:
+        from IPython.config.loader import Config
+        cfg = Config()
+        cfg.InteractiveShellEmbed.local_ns = loc
+        cfg.InteractiveShellEmbed.global_ns = glo
+        IPython.embed(config=cfg, banner2 = __header % dd)
+        from IPython.frontend.terminal.embed import InteractiveShellEmbed
+        ipshell = InteractiveShellEmbed(config=cfg)
+
     del ipshell
 
 


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -133,9 +133,9 @@
     my_plugin_name = ytcfg.get("yt","pluginfilename")
     # We assume that it is with respect to the $HOME/.yt directory
     if os.path.isfile(my_plugin_name):
-        fn = my_plugin_name
+        _fn = my_plugin_name
     else:
-        fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
-    if os.path.isfile(fn):
-        mylog.info("Loading plugins from %s", fn)
-        execfile(fn)
+        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
+    if os.path.isfile(_fn):
+        mylog.info("Loading plugins from %s", _fn)
+        execfile(_fn)


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -29,8 +29,31 @@
 cimport kdtree_utils
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
-from ray_handling cimport integrate_ray, ray_sampler
+
+cdef inline int imax(int i0, int i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+    if f0 > f1: return f0
+    return f1
+
+cdef inline int imin(int i0, int i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+    if f0 < f1: return f0
+    return f1
+
+cdef inline int iclip(int i, int a, int b):
+    if i < a: return a
+    if i > b: return b
+    return i
+
+cdef inline np.float64_t fclip(np.float64_t f,
+                      np.float64_t a, np.float64_t b):
+    return fmin(fmax(f, a), b)
 
 cdef extern from "math.h":
     double exp(double x)
@@ -288,9 +311,6 @@
     cdef int n_fields
     cdef int n_field_tables
     cdef public int ns
-    cdef int grad
-    cdef np.float64_t light_source_v[3]
-    cdef np.float64_t light_source_c[3]
 
     # These are the field tables and their affiliated storage.
     # We have one field_id for every table.  Note that a single field can
@@ -345,19 +365,13 @@
         for i in range(6):
             self.field_table_ids[i] = tf_obj.field_table_ids[i]
             #print "Channel", i, "corresponds to", self.field_table_ids[i]
-
-        self.grad = tf_obj.grad_field
-        for i in range(3):
-            self.light_source_v[i] = tf_obj.light_source_v[i]
-            self.light_source_c[i] = tf_obj.light_source_c[i]
             
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef void eval_transfer(self, np.float64_t dt, np.float64_t *dvs,
                                   np.float64_t *rgba, np.float64_t *grad):
         cdef int i, fid, use
-        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod, normalize
+        cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
         # NOTE: We now disable this.  I have left it to ease the process of
         # potentially, one day, re-including it.
         #use = 0
@@ -377,13 +391,6 @@
         for i in range(6):
             trgba[i] = istorage[self.field_table_ids[i]]
             #print i, trgba[i],
-        if self.grad != -1:
-            dot_prod = 0.0
-            for i in range(3):
-                dot_prod += grad[i] * self.light_source_v[i]
-            if dot_prod < 0: dot_prod = 0.0
-            for i in range(3):
-                trgba[i] *= dot_prod * self.light_source_c[i]
         #print
         # A few words on opacity.  We're going to be integrating equation 1.23
         # from Rybicki & Lightman.  dI_\nu / ds = -\alpha_\nu I_\nu + j_\nu
@@ -482,13 +489,6 @@
 
 cdef struct AdaptiveRayPacket
 
-cdef class PartitionedGrid
-
-cdef struct VolumeRendererData:
-    np.float64_t *rgba
-    TransferFunctionProxy tf
-    PartitionedGrid pg
-
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object LeftEdge
@@ -604,21 +604,186 @@
                     if temp > extrema[3]: extrema[3] = temp
         #print extrema[0], extrema[1], extrema[2], extrema[3]
 
-    cdef int integrate_ray(np.float64_t left_edge[3],
-                           np.float64_t right_edge[3],
-                           np.float64_t dds[3],
-                           np.float64_t idds[3],
-                           int dims[3],
-                           np.float64_t v_pos[3],
-                           np.float64_t v_dir[3],
-                           np.float64_t rgba[4],
-                           TransferFunctionProxy tf,
-                           np.float64_t *return_t = NULL,
-                           np.float64_t enter_t = -1.0):
-        integrate_ray(self.left_edge, self.right_edge,
-                      self.dds, self.idds, self.dims, v_pos,
-                      v_dir, rgba, tf, return_t, enter_t,
-                      self.sample_value)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int integrate_ray(self, np.float64_t v_pos[3],
+                                 np.float64_t v_dir[3],
+                                 np.float64_t rgba[4],
+                                 TransferFunctionProxy tf,
+                                 np.float64_t *return_t = NULL,
+                                 np.float64_t enter_t = -1.0):
+        cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+        cdef np.float64_t intersect_t = 1.0
+        cdef np.float64_t iv_dir[3]
+        cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+        cdef np.float64_t dist, alpha, dt, exit_t
+        cdef np.float64_t tr, tl, temp_x, temp_y, dv
+        for i in range(3):
+            if (v_dir[i] < 0):
+                step[i] = -1
+            elif (v_dir[i] == 0):
+                step[i] = 1
+                tmax[i] = 1e60
+                iv_dir[i] = 1e60
+                tdelta[i] = 1e-60
+                continue
+            else:
+                step[i] = 1
+            x = (i+1) % 3
+            y = (i+2) % 3
+            iv_dir[i] = 1.0/v_dir[i]
+            tl = (self.left_edge[i] - v_pos[i])*iv_dir[i]
+            temp_x = (v_pos[x] + tl*v_dir[x])
+            temp_y = (v_pos[y] + tl*v_dir[y])
+            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
+               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
+               0.0 <= tl and tl < intersect_t:
+                direction = i
+                intersect_t = tl
+            tr = (self.right_edge[i] - v_pos[i])*iv_dir[i]
+            temp_x = (v_pos[x] + tr*v_dir[x])
+            temp_y = (v_pos[y] + tr*v_dir[y])
+            if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
+               self.left_edge[y] <= temp_y and temp_y <= self.right_edge[y] and \
+               0.0 <= tr and tr < intersect_t:
+                direction = i
+                intersect_t = tr
+        if self.left_edge[0] <= v_pos[0] and v_pos[0] <= self.right_edge[0] and \
+           self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
+           self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
+            intersect_t = 0.0
+        if enter_t >= 0.0: intersect_t = enter_t
+        if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+        for i in range(3):
+            intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+            cur_ind[i] = <int> floor((intersect[i] +
+                                      step[i]*1e-8*self.dds[i] -
+                                      self.left_edge[i])*self.idds[i])
+            tmax[i] = (((cur_ind[i]+step[i])*self.dds[i])+
+                        self.left_edge[i]-v_pos[i])*iv_dir[i]
+            # This deals with the asymmetry in having our indices refer to the
+            # left edge of a cell, but the right edge of the brick being one
+            # extra zone out.
+            if cur_ind[i] == self.dims[i] and step[i] < 0:
+                cur_ind[i] = self.dims[i] - 1
+            if cur_ind[i] < 0 or cur_ind[i] >= self.dims[i]: return 0
+            if step[i] > 0:
+                tmax[i] = (((cur_ind[i]+1)*self.dds[i])
+                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
+            if step[i] < 0:
+                tmax[i] = (((cur_ind[i]+0)*self.dds[i])
+                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
+            tdelta[i] = (self.dds[i]*iv_dir[i])
+            if tdelta[i] < 0: tdelta[i] *= -1
+        # We have to jumpstart our calculation
+        enter_t = intersect_t
+        hit = 0
+        while 1:
+            # dims here is one less than the dimensions of the data,
+            # but we are tracing on the grid, not on the data...
+            if (not (0 <= cur_ind[0] < self.dims[0])) or \
+               (not (0 <= cur_ind[1] < self.dims[1])) or \
+               (not (0 <= cur_ind[2] < self.dims[2])):
+                break
+            hit += 1
+            if tmax[0] < tmax[1]:
+                if tmax[0] < tmax[2]:
+                    exit_t = fmin(tmax[0], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[0] += step[0]
+                    enter_t = tmax[0]
+                    tmax[0] += tdelta[0]
+                else:
+                    exit_t = fmin(tmax[2], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[2] += step[2]
+                    enter_t = tmax[2]
+                    tmax[2] += tdelta[2]
+            else:
+                if tmax[1] < tmax[2]:
+                    exit_t = fmin(tmax[1], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[1] += step[1]
+                    enter_t = tmax[1]
+                    tmax[1] += tdelta[1]
+                else:
+                    exit_t = fmin(tmax[2], 1.0)
+                    self.sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind,
+                                       rgba, tf)
+                    cur_ind[2] += step[2]
+                    enter_t = tmax[2]
+                    tmax[2] += tdelta[2]
+            if enter_t >= 1.0: break
+        if return_t != NULL: return_t[0] = exit_t
+        return hit
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void sample_values(self,
+                            np.float64_t v_pos[3],
+                            np.float64_t v_dir[3],
+                            np.float64_t enter_t,
+                            np.float64_t exit_t,
+                            int ci[3],
+                            np.float64_t *rgba,
+                            TransferFunctionProxy tf):
+        cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
+        cdef np.float64_t grad[3], ds[3]
+        cdef np.float64_t local_dds[3], cell_left[3]
+        grad[0] = grad[1] = grad[2] = 0.0
+        cdef int dti, i
+        cdef kdtree_utils.kdres *ballq = NULL
+        dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
+        cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+                        + ci[1] * (self.dims[2] + 1) + ci[2]
+        # The initial and final values can be linearly interpolated between; so
+        # we just have to calculate our initial and final values.
+        cdef np.float64_t slopes[6]
+        for i in range(3):
+            dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+            dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
+            dp[i] *= self.idds[i]
+            ds[i] = v_dir[i] * self.idds[i] * dt
+        for i in range(self.n_fields):
+            slopes[i] = offset_interpolate(self.dims, dp,
+                            self.data[i] + offset)
+        for i in range(3):
+            dp[i] += ds[i] * tf.ns
+        cdef np.float64_t temp
+        for i in range(self.n_fields):
+            temp = slopes[i]
+            slopes[i] -= offset_interpolate(self.dims, dp,
+                             self.data[i] + offset)
+            slopes[i] *= -1.0/tf.ns
+            self.dvs[i] = temp
+        if self.star_list != NULL:
+            for i in range(3):
+                cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
+                # this gets us dp as the current first sample position
+                pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+                dp[i] -= tf.ns * ds[i]
+                local_dds[i] = v_dir[i] * dt
+            ballq = kdtree_utils.kd_nearest_range3(
+                self.star_list, cell_left[0] + self.dds[0]*0.5,
+                                cell_left[1] + self.dds[1]*0.5,
+                                cell_left[2] + self.dds[2]*0.5,
+                                self.star_er + 0.9*self.dds[0])
+                                            # ~0.866 + a bit
+        for dti in range(tf.ns): 
+            #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
+            #    continue
+            if self.star_list != NULL:
+                self.add_stars(ballq, dt, pos, rgba)
+                for i in range(3):
+                    dp[i] += ds[i]
+                    pos[i] += local_dds[i]
+            tf.eval_transfer(dt, self.dvs, rgba, grad)
+            for i in range(self.n_fields):
+                self.dvs[i] += slopes[i]
+        if ballq != NULL: kdtree_utils.kd_res_free(ballq)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1846,72 +2011,3 @@
 # From Enzo:
 #   dOmega = 4 pi r^2/Nrays
 #   if (dOmega > RaysPerCell * dx^2) then split
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef void pg_sample_values(np.float64_t v_pos[3],
-                        np.float64_t v_dir[3],
-                        np.float64_t enter_t,
-                        np.float64_t exit_t,
-                        int ci[3],
-                        void *rdata):
-    cdef VolumeRendererData *dd = <VolumeRendererData*> rdata
-    cdef PartitionedGrid self = dd.pg
-    cdef np.float64_t cp[3], dp[3], pos[3], dt, t, dv
-    cdef np.float64_t grad[3], ds[3]
-    cdef np.float64_t local_dds[3], cell_left[3]
-    grad[0] = grad[1] = grad[2] = 0.0
-    cdef int dti, i
-    cdef kdtree_utils.kdres *ballq = NULL
-    dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
-    cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
-                    + ci[1] * (self.dims[2] + 1) + ci[2]
-    # The initial and final values can be linearly interpolated between; so
-    # we just have to calculate our initial and final values.
-    cdef np.float64_t slopes[6]
-    for i in range(3):
-        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-        dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
-        dp[i] *= self.idds[i]
-        ds[i] = v_dir[i] * self.idds[i] * dt
-    for i in range(self.n_fields):
-        slopes[i] = offset_interpolate(self.dims, dp,
-                        self.data[i] + offset)
-        if tf.grad == i:
-            eval_gradient(self.dims, dp, self.data[i] + offset,
-                          grad)
-    for i in range(3):
-        dp[i] += ds[i] * tf.ns
-    cdef np.float64_t temp
-    for i in range(self.n_fields):
-        temp = slopes[i]
-        slopes[i] -= offset_interpolate(self.dims, dp,
-                         self.data[i] + offset)
-        slopes[i] *= -1.0/tf.ns
-        self.dvs[i] = temp
-    if self.star_list != NULL:
-        for i in range(3):
-            cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
-            # this gets us dp as the current first sample position
-            pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-            dp[i] -= tf.ns * ds[i]
-            local_dds[i] = v_dir[i] * dt
-        ballq = kdtree_utils.kd_nearest_range3(
-            self.star_list, cell_left[0] + self.dds[0]*0.5,
-                            cell_left[1] + self.dds[1]*0.5,
-                            cell_left[2] + self.dds[2]*0.5,
-                            self.star_er + 0.9*self.dds[0])
-                                        # ~0.866 + a bit
-    for dti in range(tf.ns): 
-        #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
-        #    continue
-        if self.star_list != NULL:
-            self.add_stars(ballq, dt, pos, rgba)
-            for i in range(3):
-                dp[i] += ds[i]
-                pos[i] += local_dds[i]
-        tf.eval_transfer(dt, self.dvs, rgba, grad)
-        for i in range(self.n_fields):
-            self.dvs[i] += slopes[i]
-    if ballq != NULL: kdtree_utils.kd_res_free(ballq)
-


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -0,0 +1,93 @@
+"""
+Field Interpolation Tables
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport cython
+cimport numpy as np
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+
+cdef struct FieldInterpolationTable:
+    # Note that we make an assumption about retaining a reference to values
+    # externally.
+    np.float64_t *values 
+    np.float64_t bounds[2]
+    np.float64_t dbin
+    np.float64_t idbin
+    int field_id
+    int weight_field_id
+    int weight_table_id
+    int nbins
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void FIT_initialize_table(FieldInterpolationTable *fit, int nbins,
+              np.float64_t *values, np.float64_t bounds1, np.float64_t bounds2,
+              int field_id, int weight_field_id, int weight_table_id) nogil:
+    fit.bounds[0] = bounds1; fit.bounds[1] = bounds2
+    fit.nbins = nbins
+    fit.dbin = (fit.bounds[1] - fit.bounds[0])/fit.nbins
+    fit.idbin = 1.0/fit.dbin
+    # Better not pull this out from under us, yo
+    fit.values = values
+    fit.field_id = field_id
+    fit.weight_field_id = weight_field_id
+    fit.weight_table_id = weight_table_id
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.float64_t FIT_get_value(FieldInterpolationTable *fit,
+                                       np.float64_t dvs[6]) nogil:
+    cdef np.float64_t bv, dy, dd, tf, rv
+    cdef int bin_id
+    if dvs[fit.field_id] > fit.bounds[1] or dvs[fit.field_id] < fit.bounds[0]: return 0.0
+    bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
+    dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0
+    bv = fit.values[bin_id]
+    dy = fit.values[bin_id + 1] - bv
+    if fit.weight_field_id != -1:
+        return dvs[fit.weight_field_id] * (bv + dd*dy*fit.idbin)
+    return (bv + dd*dy*fit.idbin)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void FIT_eval_transfer(np.float64_t dt, np.float64_t *dvs,
+                            np.float64_t *rgba, int n_fits,
+                            FieldInterpolationTable fits[6],
+                            int field_table_ids[6]) nogil:
+    cdef int i, fid, use
+    cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+    for i in range(6): istorage[i] = 0.0
+    for i in range(n_fits):
+        istorage[i] = FIT_get_value(&fits[i], dvs)
+    for i in range(n_fits):
+        fid = fits[i].weight_table_id
+        if fid != -1: istorage[i] *= istorage[fid]
+    for i in range(6):
+        trgba[i] = istorage[field_table_ids[i]]
+    for i in range(3):
+        ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
+        rgba[i] = dt*trgba[i] + ta * rgba[i]


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/fp_utils.pxd
--- a/yt/utilities/_amr_utils/fp_utils.pxd
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -26,28 +26,28 @@
 cimport numpy as np
 cimport cython
 
-cdef inline int imax(int i0, int i1):
+cdef inline int imax(int i0, int i1) nogil:
     if i0 > i1: return i0
     return i1
 
-cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1):
+cdef inline np.float64_t fmax(np.float64_t f0, np.float64_t f1) nogil:
     if f0 > f1: return f0
     return f1
 
-cdef inline int imin(int i0, int i1):
+cdef inline int imin(int i0, int i1) nogil:
     if i0 < i1: return i0
     return i1
 
-cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1):
+cdef inline np.float64_t fmin(np.float64_t f0, np.float64_t f1) nogil:
     if f0 < f1: return f0
     return f1
 
-cdef inline int iclip(int i, int a, int b):
+cdef inline int iclip(int i, int a, int b) nogil:
     if i < a: return a
     if i > b: return b
     return i
 
 cdef inline np.float64_t fclip(np.float64_t f,
-                      np.float64_t a, np.float64_t b):
+                      np.float64_t a, np.float64_t b) nogil:
     return fmin(fmax(f, a), b)
 


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/grid_traversal.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -0,0 +1,648 @@
+"""
+Simple integrators for the radiative transfer equation
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+cimport kdtree_utils
+cimport healpix_interface
+from stdlib cimport malloc, free, abs
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from field_interpolation_tables cimport \
+    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer
+
+from cython.parallel import prange, parallel, threadid
+
+cdef extern from "math.h":
+    double exp(double x) nogil
+    float expf(float x) nogil
+    long double expl(long double x) nogil
+    double floor(double x) nogil
+    double ceil(double x) nogil
+    double fmod(double x, double y) nogil
+    double log2(double x) nogil
+    long int lrint(double x) nogil
+    double fabs(double x) nogil
+
+cdef struct VolumeContainer
+ctypedef void sample_function(
+                VolumeContainer *vc,
+                np.float64_t v_pos[3],
+                np.float64_t v_dir[3],
+                np.float64_t enter_t,
+                np.float64_t exit_t,
+                int index[3],
+                void *data) nogil
+
+cdef extern from "FixedInterpolator.h":
+    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                  np.float64_t *data) nogil
+    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3],
+                                    np.float64_t *data) nogil
+    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                       np.float64_t *data) nogil
+    void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
+                       np.float64_t grad[3]) nogil
+    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval) nogil
+    void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
+                       np.float64_t vl[3], np.float64_t dds[3],
+                       np.float64_t x, np.float64_t y, np.float64_t z,
+                       int vind1, int vind2) nogil
+
+cdef struct VolumeContainer:
+    int n_fields
+    np.float64_t **data
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    np.float64_t dds[3]
+    np.float64_t idds[3]
+    int dims[3]
+
+cdef class PartitionedGrid:
+    cdef public object my_data
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef VolumeContainer *container
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __cinit__(self,
+                  int parent_grid_id, data,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  np.ndarray[np.int64_t, ndim=1] dims):
+        # The data is likely brought in via a slice, so we copy it
+        cdef np.ndarray[np.float64_t, ndim=3] tdata
+        self.LeftEdge = left_edge
+        self.RightEdge = right_edge
+        self.container = <VolumeContainer *> \
+            malloc(sizeof(VolumeContainer))
+        cdef VolumeContainer *c = self.container # convenience
+        cdef int n_fields = len(data)
+        c.n_fields = n_fields
+        for i in range(3):
+            c.left_edge[i] = left_edge[i]
+            c.right_edge[i] = right_edge[i]
+            c.dims[i] = dims[i]
+            c.dds[i] = (c.right_edge[i] - c.left_edge[i])/dims[i]
+            c.idds[i] = 1.0/c.dds[i]
+        self.my_data = data
+        c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
+        for i in range(n_fields):
+            tdata = data[i]
+            c.data[i] = <np.float64_t *> tdata.data
+
+    def __dealloc__(self):
+        # The data fields are not owned by the container, they are owned by us!
+        # So we don't need to deallocate them.
+        free(self.container.data)
+        free(self.container)
+
+cdef struct ImageContainer:
+    np.float64_t *vp_pos, *vp_dir, *center, *image,
+    np.float64_t pdx, pdy, bounds[4]
+    int nv[2]
+    int vp_strides[3]
+    int im_strides[3]
+    int vd_strides[3]
+    np.float64_t *x_vec, *y_vec
+
+cdef struct ImageAccumulator:
+    np.float64_t rgba[3]
+    void *supp_data
+
+cdef class ImageSampler:
+    cdef ImageContainer *image
+    cdef sample_function *sampler
+    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef void *supp_data
+    cdef np.float64_t width[3]
+    def __init__(self, 
+                  np.ndarray vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
+                  *args, **kwargs):
+        self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
+        cdef ImageContainer *imagec = self.image
+        self.sampler = NULL
+        cdef int i, j
+        # These assignments are so we can track the objects and prevent their
+        # de-allocation from reference counts.
+        self.avp_pos = vp_pos
+        self.avp_dir = vp_dir
+        self.acenter = center
+        self.aimage = image
+        self.ax_vec = x_vec
+        self.ay_vec = y_vec
+        imagec.vp_pos = <np.float64_t *> vp_pos.data
+        imagec.vp_dir = <np.float64_t *> vp_dir.data
+        imagec.center = <np.float64_t *> center.data
+        imagec.image = <np.float64_t *> image.data
+        imagec.x_vec = <np.float64_t *> x_vec.data
+        imagec.y_vec = <np.float64_t *> y_vec.data
+        imagec.nv[0] = image.shape[0]
+        imagec.nv[1] = image.shape[1]
+        for i in range(4): imagec.bounds[i] = bounds[i]
+        imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
+        imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
+        for i in range(3):
+            imagec.vp_strides[i] = vp_pos.strides[i] / 8
+            imagec.im_strides[i] = image.strides[i] / 8
+            self.width[i] = width[i]
+        if vp_dir.ndim > 1:
+            for i in range(3):
+                imagec.vd_strides[i] = vp_dir.strides[i] / 8
+        elif vp_pos.ndim == 1:
+            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
+        else:
+            raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void get_start_stop(self, np.float64_t *ex, int *rv):
+        # Extrema need to be re-centered
+        cdef np.float64_t cx, cy
+        cdef ImageContainer *im = self.image
+        cdef int i
+        cx = cy = 0.0
+        for i in range(3):
+            cx += im.center[i] * im.x_vec[i]
+            cy += im.center[i] * im.y_vec[i]
+        rv[0] = lrint((ex[0] - cx - im.bounds[0])/im.pdx)
+        rv[1] = rv[0] + lrint((ex[1] - ex[0])/im.pdx)
+        rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
+        rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void calculate_extent(self, np.float64_t extrema[4],
+                               VolumeContainer *vc) nogil:
+        # We do this for all eight corners
+        cdef np.float64_t *edges[2], temp
+        edges[0] = vc.left_edge
+        edges[1] = vc.right_edge
+        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+        cdef int i, j, k
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    # This should rotate it into the vector plane
+                    temp  = edges[i][0] * self.image.x_vec[0]
+                    temp += edges[j][1] * self.image.x_vec[1]
+                    temp += edges[k][2] * self.image.x_vec[2]
+                    if temp < extrema[0]: extrema[0] = temp
+                    if temp > extrema[1]: extrema[1] = temp
+                    temp  = edges[i][0] * self.image.y_vec[0]
+                    temp += edges[j][1] * self.image.y_vec[1]
+                    temp += edges[k][2] * self.image.y_vec[2]
+                    if temp < extrema[2]: extrema[2] = temp
+                    if temp > extrema[3]: extrema[3] = temp
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, PartitionedGrid pg):
+        # This routine will iterate over all of the vectors and cast each in
+        # turn.  Might benefit from a more sophisticated intersection check,
+        # like http://courses.csusm.edu/cs697exz/ray_box.htm
+        cdef int vi, vj, hit, i, j, ni, nj, nn, offset
+        cdef int iter[4]
+        cdef VolumeContainer *vc = pg.container
+        cdef ImageContainer *im = self.image
+        self.setup(pg)
+        if self.sampler == NULL: raise RuntimeError
+        cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4]
+        hit = 0
+        self.calculate_extent(extrema, vc)
+        self.get_start_stop(extrema, iter)
+        iter[0] = iclip(iter[0]-1, 0, im.nv[0]-1)
+        iter[1] = iclip(iter[1]+1, 0, im.nv[0]-1)
+        iter[2] = iclip(iter[2]-1, 0, im.nv[1]-1)
+        iter[3] = iclip(iter[3]+1, 0, im.nv[1]-1)
+        cdef ImageAccumulator *idata
+        cdef void *data
+        cdef int nx = (iter[1] - iter[0])
+        cdef int ny = (iter[3] - iter[2])
+        cdef int size = nx * ny
+        cdef np.float64_t px, py 
+        cdef np.float64_t width[3] 
+        for i in range(3):
+            width[i] = self.width[i]
+        #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
+        with nogil, parallel():
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            if im.vd_strides[0] == -1:
+                for j in prange(size, schedule="dynamic"):
+                    vj = j % ny
+                    vi = (j - vj) / ny + iter[0]
+                    vj = vj + iter[2]
+                    # Dynamically calculate the position
+                    px = width[0] * (<float>vi)/(<float>im.nv[0]-1) - width[0]/2.0
+                    py = width[1] * (<float>vj)/(<float>im.nv[1]-1) - width[1]/2.0
+                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
+            else:
+                # If we do not have a simple image plane, we have to cast all
+                # our rays 
+                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+                for j in prange(size, schedule="dynamic"):
+                    vj = j % ny
+                    vi = (j - vj) / ny + iter[0]
+                    vj = vj + iter[2]
+                    offset = im.vp_strides[0] * vi + im.vp_strides[1] * vj
+                    for i in range(3): v_pos[i] = im.vp_dir[i + offset]
+                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    offset = im.vd_strides[0] * vi + im.vd_strides[1] * vj
+                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                    walk_volume(vc, v_pos, v_dir, self.sampler, data)
+                free(v_dir)
+            free(idata)
+            free(v_pos)
+        return hit
+
+cdef void projection_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef int i
+    cdef np.float64_t dl = (exit_t - enter_t)
+    # We need this because by default it assumes vertex-centered data.
+    for i in range(3):
+        if index[i] < 0 or index[i] >= vc.dims[i]: return
+    cdef int di = (index[0]*(vc.dims[1])+index[1])*vc.dims[2]+index[2]
+    for i in range(imin(3, vc.n_fields)):
+        im.rgba[i] += vc.data[i][di] * dl
+
+cdef class ProjectionSampler(ImageSampler):
+    def setup(self, PartitionedGrid pg):
+        self.sampler = projection_sampler
+
+cdef struct VolumeRenderAccumulator:
+    int n_fits
+    int n_samples
+    FieldInterpolationTable *fits
+    int field_table_ids[6]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void volume_render_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6]
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vc.n_fields):
+        slopes[i] = offset_interpolate(vc.dims, dp,
+                        vc.data[i] + offset)
+    for i in range(3):
+        dp[i] += ds[i] * vri.n_samples
+    cdef np.float64_t temp
+    for i in range(vc.n_fields):
+        temp = slopes[i]
+        slopes[i] -= offset_interpolate(vc.dims, dp,
+                         vc.data[i] + offset)
+        slopes[i] *= -1.0/vri.n_samples
+        dvs[i] = temp
+    for dti in range(vri.n_samples): 
+        FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
+                          vri.field_table_ids)
+        for i in range(vc.n_fields):
+            dvs[i] += slopes[i]
+
+cdef class VolumeRenderSampler(ImageSampler):
+    cdef VolumeRenderAccumulator *vra
+    cdef public object tf_obj
+    cdef public object my_field_tables
+    def __cinit__(self, 
+                  np.ndarray vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
+                  tf_obj, n_samples = 10):
+        ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+                               x_vec, y_vec, width)
+        cdef int i
+        cdef np.ndarray[np.float64_t, ndim=1] temp
+        # Now we handle tf_obj
+        self.vra = <VolumeRenderAccumulator *> \
+            malloc(sizeof(VolumeRenderAccumulator))
+        self.vra.fits = <FieldInterpolationTable *> \
+            malloc(sizeof(FieldInterpolationTable) * 6)
+        self.vra.n_fits = tf_obj.n_field_tables
+        assert(self.vra.n_fits <= 6)
+        self.vra.n_samples = n_samples
+        self.my_field_tables = []
+        for i in range(self.vra.n_fits):
+            temp = tf_obj.tables[i].y
+            FIT_initialize_table(&self.vra.fits[i],
+                      temp.shape[0],
+                      <np.float64_t *> temp.data,
+                      tf_obj.tables[i].x_bounds[0],
+                      tf_obj.tables[i].x_bounds[1],
+                      tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
+                      tf_obj.weight_table_ids[i])
+            self.my_field_tables.append((tf_obj.tables[i],
+                                         tf_obj.tables[i].y))
+        for i in range(6):
+            self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
+        self.supp_data = <void *> self.vra
+
+    def setup(self, PartitionedGrid pg):
+        self.sampler = volume_render_sampler
+
+    def __dealloc__(self):
+        return
+        free(self.vra.fits)
+        free(self.vra)
+
+cdef class GridFace:
+    cdef int direction
+    cdef public np.float64_t coord
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __init__(self, grid, int direction, int left):
+        self.direction = direction
+        if left == 1:
+            self.coord = grid.LeftEdge[direction]
+        else:
+            self.coord = grid.RightEdge[direction]
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = grid.LeftEdge[i]
+            self.right_edge[i] = grid.RightEdge[i]
+        self.left_edge[direction] = self.right_edge[direction] = self.coord
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int proj_overlap(self, np.float64_t *left_edge, np.float64_t *right_edge):
+        cdef int xax, yax
+        xax = (self.direction + 1) % 3
+        yax = (self.direction + 2) % 3
+        if left_edge[xax] >= self.right_edge[xax]: return 0
+        if right_edge[xax] <= self.left_edge[xax]: return 0
+        if left_edge[yax] >= self.right_edge[yax]: return 0
+        if right_edge[yax] <= self.left_edge[yax]: return 0
+        return 1
+
+cdef class ProtoPrism:
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef public object subgrid_faces
+    cdef public int parent_grid_id
+    def __cinit__(self, int parent_grid_id,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  subgrid_faces):
+        self.parent_grid_id = parent_grid_id
+        cdef int i
+        self.LeftEdge = left_edge
+        self.RightEdge = right_edge
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.subgrid_faces = subgrid_faces
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def sweep(self, int direction = 0, int stack = 0):
+        cdef int i
+        cdef GridFace face
+        cdef np.float64_t proto_split[3]
+        for i in range(3): proto_split[i] = self.right_edge[i]
+        for face in self.subgrid_faces[direction]:
+            proto_split[direction] = face.coord
+            if proto_split[direction] <= self.left_edge[direction]:
+                continue
+            if proto_split[direction] == self.right_edge[direction]:
+                if stack == 2: return [self]
+                return self.sweep((direction + 1) % 3, stack + 1)
+            if face.proj_overlap(self.left_edge, proto_split) == 1:
+                left, right = self.split(proto_split, direction)
+                LC = left.sweep((direction + 1) % 3)
+                RC = right.sweep(direction)
+                return LC + RC
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef object split(self, np.float64_t *sp, int direction):
+        cdef int i
+        cdef np.ndarray split_left = self.LeftEdge.copy()
+        cdef np.ndarray split_right = self.RightEdge.copy()
+
+        for i in range(3): split_left[i] = self.right_edge[i]
+        split_left[direction] = sp[direction]
+        left = ProtoPrism(self.parent_grid_id, self.LeftEdge, split_left,
+                          self.subgrid_faces)
+
+        for i in range(3): split_right[i] = self.left_edge[i]
+        split_right[direction] = sp[direction]
+        right = ProtoPrism(self.parent_grid_id, split_right, self.RightEdge,
+                           self.subgrid_faces)
+
+        return (left, right)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def get_brick(self, np.ndarray[np.float64_t, ndim=1] grid_left_edge,
+                        np.ndarray[np.float64_t, ndim=1] grid_dds,
+                        child_mask):
+        # We get passed in the left edge, the dds (which gives dimensions) and
+        # the data, which is already vertex-centered.
+        cdef PartitionedGrid PG
+        cdef int li[3], ri[3], idims[3], i
+        for i in range(3):
+            li[i] = lrint((self.left_edge[i] - grid_left_edge[i])/grid_dds[i])
+            ri[i] = lrint((self.right_edge[i] - grid_left_edge[i])/grid_dds[i])
+            idims[i] = ri[i] - li[i]
+        if child_mask[li[0], li[1], li[2]] == 0: return []
+        cdef np.ndarray[np.int64_t, ndim=1] dims = np.empty(3, dtype='int64')
+        for i in range(3):
+            dims[i] = idims[i]
+        #cdef np.ndarray[np.float64_t, ndim=3] new_data
+        #new_data = data[li[0]:ri[0]+1,li[1]:ri[1]+1,li[2]:ri[2]+1].copy()
+        #PG = PartitionedGrid(self.parent_grid_id, new_data,
+        #                     self.LeftEdge, self.RightEdge, dims)
+        return ((li[0], ri[0]), (li[1], ri[1]), (li[2], ri[2]), dims)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int walk_volume(VolumeContainer *vc,
+                     np.float64_t v_pos[3],
+                     np.float64_t v_dir[3],
+                     sample_function *sampler,
+                     void *data,
+                     np.float64_t *return_t = NULL,
+                     np.float64_t enter_t = -1.0) nogil:
+    cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
+    cdef np.float64_t intersect_t = 1.0
+    cdef np.float64_t iv_dir[3]
+    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+    cdef np.float64_t dist, alpha, dt, exit_t
+    cdef np.float64_t tr, tl, temp_x, temp_y, dv
+    for i in range(3):
+        if (v_dir[i] < 0):
+            step[i] = -1
+        elif (v_dir[i] == 0):
+            step[i] = 1
+            tmax[i] = 1e60
+            iv_dir[i] = 1e60
+            tdelta[i] = 1e-60
+            continue
+        else:
+            step[i] = 1
+        x = (i+1) % 3
+        y = (i+2) % 3
+        iv_dir[i] = 1.0/v_dir[i]
+        tl = (vc.left_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tl*v_dir[x])
+        temp_y = (v_pos[y] + tl*v_dir[y])
+        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+           0.0 <= tl and tl < intersect_t:
+            direction = i
+            intersect_t = tl
+        tr = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tr*v_dir[x])
+        temp_y = (v_pos[y] + tr*v_dir[y])
+        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+           0.0 <= tr and tr < intersect_t:
+            direction = i
+            intersect_t = tr
+    if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
+       vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
+       vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
+        intersect_t = 0.0
+    if enter_t >= 0.0: intersect_t = enter_t
+    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    for i in range(3):
+        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
+        cur_ind[i] = <int> floor((intersect[i] +
+                                  step[i]*1e-8*vc.dds[i] -
+                                  vc.left_edge[i])*vc.idds[i])
+        tmax[i] = (((cur_ind[i]+step[i])*vc.dds[i])+
+                    vc.left_edge[i]-v_pos[i])*iv_dir[i]
+        # This deals with the asymmetry in having our indices refer to the
+        # left edge of a cell, but the right edge of the brick being one
+        # extra zone out.
+        if cur_ind[i] == vc.dims[i] and step[i] < 0:
+            cur_ind[i] = vc.dims[i] - 1
+        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i]: return 0
+        if step[i] > 0:
+            tmax[i] = (((cur_ind[i]+1)*vc.dds[i])
+                        +vc.left_edge[i]-v_pos[i])*iv_dir[i]
+        if step[i] < 0:
+            tmax[i] = (((cur_ind[i]+0)*vc.dds[i])
+                        +vc.left_edge[i]-v_pos[i])*iv_dir[i]
+        tdelta[i] = (vc.dds[i]*iv_dir[i])
+        if tdelta[i] < 0: tdelta[i] *= -1
+    # We have to jumpstart our calculation
+    enter_t = intersect_t
+    hit = 0
+    while 1:
+        # dims here is one less than the dimensions of the data,
+        # but we are tracing on the grid, not on the data...
+        if (not (0 <= cur_ind[0] < vc.dims[0])) or \
+           (not (0 <= cur_ind[1] < vc.dims[1])) or \
+           (not (0 <= cur_ind[2] < vc.dims[2])):
+            break
+        hit += 1
+        if tmax[0] < tmax[1]:
+            if tmax[0] < tmax[2]:
+                exit_t = fmin(tmax[0], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[0] += step[0]
+                enter_t = tmax[0]
+                tmax[0] += tdelta[0]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        else:
+            if tmax[1] < tmax[2]:
+                exit_t = fmin(tmax[1], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[1] += step[1]
+                enter_t = tmax[1]
+                tmax[1] += tdelta[1]
+            else:
+                exit_t = fmin(tmax[2], 1.0)
+                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+                cur_ind[2] += step[2]
+                enter_t = tmax[2]
+                tmax[2] += tdelta[2]
+        if enter_t >= 1.0: break
+    if return_t != NULL: return_t[0] = exit_t
+    return hit


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/perftools_wrap.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/perftools_wrap.pyx
@@ -0,0 +1,39 @@
+"""
+Turn on and off perftools profiling
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# For more info:
+# https://pygabriel.wordpress.com/2010/04/14/profiling-python-c-extensions/
+
+# prof.pyx
+cdef extern from "google/profiler.h":
+    void ProfilerStart( char* fname )
+    void ProfilerStop()
+
+def profiler_start(fname):
+    ProfilerStart(<char *>fname)
+
+def profiler_stop():
+    ProfilerStop()
+


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/ray_handling.pxd
--- a/yt/utilities/_amr_utils/ray_handling.pxd
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-General purpose ray casting
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-
-cimport numpy as np
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
-
-ctypedef void (*ray_sampler) (np.float64_t v_pos[3],
-                              np.float64_t v_dir[3],
-                              np.float64_t enter_t,
-                              np.float64_t exit_t,
-                              int ci[3],
-                              void *rdata)
-
- at cython.cdivision(True)
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef int integrate_ray(np.float64_t left_edge[3],
-                       np.float64_t right_edge[3],
-                       np.float64_t dds[3],
-                       np.float64_t idds[3],
-                       int dims[3],
-                       np.float64_t v_pos[3],
-                       np.float64_t v_dir[3],
-                       np.float64_t *return_t,
-                       np.float64_t enter_t,
-                       void *rdata):
-    cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
-    cdef np.float64_t intersect_t = 1.0
-    cdef np.float64_t iv_dir[3]
-    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
-    cdef np.float64_t dist, alpha, dt, exit_t
-    cdef np.float64_t tr, tl, temp_x, temp_y, dv
-    for i in range(3):
-        if (v_dir[i] < 0):
-            step[i] = -1
-        elif (v_dir[i] == 0):
-            step[i] = 1
-            tmax[i] = 1e60
-            iv_dir[i] = 1e60
-            tdelta[i] = 1e-60
-            continue
-        else:
-            step[i] = 1
-        x = (i+1) % 3
-        y = (i+2) % 3
-        iv_dir[i] = 1.0/v_dir[i]
-        tl = (left_edge[i] - v_pos[i])*iv_dir[i]
-        temp_x = (v_pos[x] + tl*v_dir[x])
-        temp_y = (v_pos[y] + tl*v_dir[y])
-        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
-           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
-           0.0 <= tl and tl < intersect_t:
-            direction = i
-            intersect_t = tl
-        tr = (right_edge[i] - v_pos[i])*iv_dir[i]
-        temp_x = (v_pos[x] + tr*v_dir[x])
-        temp_y = (v_pos[y] + tr*v_dir[y])
-        if left_edge[x] <= temp_x and temp_x <= right_edge[x] and \
-           left_edge[y] <= temp_y and temp_y <= right_edge[y] and \
-           0.0 <= tr and tr < intersect_t:
-            direction = i
-            intersect_t = tr
-    if left_edge[0] <= v_pos[0] and v_pos[0] <= right_edge[0] and \
-       left_edge[1] <= v_pos[1] and v_pos[1] <= right_edge[1] and \
-       left_edge[2] <= v_pos[2] and v_pos[2] <= right_edge[2]:
-        intersect_t = 0.0
-    if enter_t >= 0.0: intersect_t = enter_t
-    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
-    for i in range(3):
-        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
-        cur_ind[i] = <int> floor((intersect[i] +
-                                  step[i]*1e-8*dds[i] -
-                                  left_edge[i])*idds[i])
-        tmax[i] = (((cur_ind[i]+step[i])*dds[i])+
-                    left_edge[i]-v_pos[i])*iv_dir[i]
-        # This deals with the asymmetry in having our indices refer to the
-        # left edge of a cell, but the right edge of the brick being one
-        # extra zone out.
-        if cur_ind[i] == dims[i] and step[i] < 0:
-            cur_ind[i] = dims[i] - 1
-        if cur_ind[i] < 0 or cur_ind[i] >= dims[i]: return 0
-        if step[i] > 0:
-            tmax[i] = (((cur_ind[i]+1)*dds[i])
-                        +left_edge[i]-v_pos[i])*iv_dir[i]
-        if step[i] < 0:
-            tmax[i] = (((cur_ind[i]+0)*dds[i])
-                        +left_edge[i]-v_pos[i])*iv_dir[i]
-        tdelta[i] = (dds[i]*iv_dir[i])
-        if tdelta[i] < 0: tdelta[i] *= -1
-    # We have to jumpstart our calculation
-    enter_t = intersect_t
-    hit = 0
-    while 1:
-        # dims here is one less than the dimensions of the data,
-        # but we are tracing on the grid, not on the data...
-        if (not (0 <= cur_ind[0] < dims[0])) or \
-           (not (0 <= cur_ind[1] < dims[1])) or \
-           (not (0 <= cur_ind[2] < dims[2])):
-            break
-        hit += 1
-        if tmax[0] < tmax[1]:
-            if tmax[0] < tmax[2]:
-                exit_t = fmin(tmax[0], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[0] += step[0]
-                enter_t = tmax[0]
-                tmax[0] += tdelta[0]
-            else:
-                exit_t = fmin(tmax[2], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[2] += step[2]
-                enter_t = tmax[2]
-                tmax[2] += tdelta[2]
-        else:
-            if tmax[1] < tmax[2]:
-                exit_t = fmin(tmax[1], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[1] += step[1]
-                enter_t = tmax[1]
-                tmax[1] += tdelta[1]
-            else:
-                exit_t = fmin(tmax[2], 1.0)
-                sample_values(v_pos, v_dir, enter_t, exit_t, cur_ind, rdata)
-                cur_ind[2] += step[2]
-                enter_t = tmax[2]
-                tmax[2] += tdelta[2]
-        if enter_t >= 1.0: break
-    if return_t != NULL: return_t[0] = exit_t
-    return hit
-


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -173,5 +173,28 @@
                           "yt/utilities/_amr_utils/healpix_pix2vec_nest.c",
                           "yt/utilities/_amr_utils/healpix_vec2pix_nest.c"]
           )
+    config.add_extension("grid_traversal", 
+               ["yt/utilities/_amr_utils/grid_traversal.pyx",
+                "yt/utilities/_amr_utils/FixedInterpolator.c"],
+               include_dirs=["yt/utilities/_amr_utils/"],
+               libraries=["m"], 
+               extra_compile_args=['-fopenmp'],
+               extra_link_args=['-fopenmp'],
+               depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
+                          "yt/utilities/_amr_utils/fp_utils.pxd",
+                          "yt/utilities/_amr_utils/FixedInterpolator.h",
+                          ]
+          )
+    if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
+        gpd = os.environ["GPERFTOOLS"]
+        idir = os.path.join(gpd, "include")
+        ldir = os.path.join(gpd, "lib")
+        print "INCLUDE AND LIB DIRS", idir, ldir
+        config.add_extension("perftools_wrap",
+                ["yt/utilities/_amr_utils/perftools_wrap.pyx"],
+                libraries=["profiler"],
+                library_dirs = [ldir],
+                include_dirs = [idir],
+            )
     config.make_config_py() # installs __config__.py
     return config


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -28,7 +28,8 @@
 import numpy as na
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
-from yt.utilities.amr_utils import PartitionedGrid, kdtree_get_choices
+from yt.utilities.amr_utils import kdtree_get_choices
+from yt.utilities._amr_utils.grid_traversal import PartitionedGrid
 from yt.utilities.performance_counters import yt_counters, time_function
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
@@ -678,7 +679,7 @@
                 if na.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
-                    current_node.brick = PartitionedGrid(current_node.grid.id, len(self.fields), data,
+                    current_node.brick = PartitionedGrid(current_node.grid.id, data,
                                                          current_node.l_corner.copy(), 
                                                          current_node.r_corner.copy(), 
                                                          current_node.dims.astype('int64'))
@@ -708,7 +709,7 @@
                   current_node.li[1]:current_node.ri[1]+1,
                   current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
 
-        current_node.brick = PartitionedGrid(current_node.grid.id, len(self.fields), data,
+        current_node.brick = PartitionedGrid(current_node.grid.id, data,
                                              current_node.l_corner.copy(), 
                                              current_node.r_corner.copy(), 
                                              current_node.dims.astype('int64'))
@@ -1251,7 +1252,7 @@
                 if node.grid is not None:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
-                    node.brick = PartitionedGrid(node.grid.id, len(self.fields), data,
+                    node.brick = PartitionedGrid(node.grid.id, data,
                                                  node.l_corner.copy(), 
                                                  node.r_corner.copy(), 
                                                  node.dims.astype('int64'))


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -325,10 +325,11 @@
         req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
     return urllib2.urlopen(req).read()
 
-def _get_yt_supp():
+def _get_yt_supp(uu):
     supp_path = os.path.join(os.environ["YT_DEST"], "src",
                              "yt-supplemental")
     # Now we check that the supplemental repository is checked out.
+    from mercurial import hg, ui, commands
     if not os.path.isdir(supp_path):
         print
         print "*** The yt-supplemental repository is not checked ***"
@@ -466,14 +467,29 @@
             print "Could not load file."
             sys.exit()
         import yt.mods
-        from IPython.Shell import IPShellEmbed
+
+        import IPython
+        if IPython.__version__.startswith("0.10"):
+            api_version = '0.10'
+        elif IPython.__version__.startswith("0.11"):
+            api_version = '0.11'
+
         local_ns = yt.mods.__dict__.copy()
         local_ns['pf'] = pf
-        shell = IPShellEmbed()
-        shell(local_ns = local_ns,
-              header =
-            "\nHi there!  Welcome to yt.\n\nWe've loaded your parameter file as 'pf'.  Enjoy!"
-             )
+
+        if api_version == '0.10':
+            shell = IPython.Shell.IPShellEmbed()
+            shell(local_ns = local_ns,
+                  header =
+                  "\nHi there!  Welcome to yt.\n\nWe've loaded your parameter file as 'pf'.  Enjoy!"
+                  )
+        else:
+            from IPython.config.loader import Config
+            cfg = Config()
+            cfg.InteractiveShellEmbed.local_ns = local_ns
+            IPython.embed(config=cfg)
+            from IPython.frontend.terminal.embed import InteractiveShellEmbed
+            ipshell = InteractiveShellEmbed(config=cfg)
 
     @add_cmd_options(['outputfn','bn','thresh','dm_only','skip'])
     @check_args
@@ -889,7 +905,7 @@
             print "*** to point to the installation location!        ***"
             print
             sys.exit(1)
-        supp_path = _get_yt_supp()
+        supp_path = _get_yt_supp(uu)
         print
         print "I have found the yt-supplemental repository at %s" % (supp_path)
         print
@@ -1305,7 +1321,8 @@
         import imp
         from mercurial import hg, ui, commands, error, config
         uri = "http://hub.yt-project.org/3rdparty/API/api.php"
-        supp_path = _get_yt_supp()
+        uu = ui.ui()
+        supp_path = _get_yt_supp(uu)
         try:
             result = imp.find_module("cedit", [supp_path])
         except ImportError:
@@ -1322,7 +1339,6 @@
             print "Sorry, but I'm going to bail."
             sys.exit(1)
         hgbb = imp.load_module("hgbb", *result)
-        uu = ui.ui()
         try:
             repo = hg.repository(uu, opts.repo)
             conf = config.config()


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -344,9 +344,13 @@
     result_id = None
 
 def parallel_objects(objects, njobs, storage = None):
-    if not parallel_capable: raise RuntimeError
+    if not parallel_capable:
+        njobs = 1
+        mylog.warn("parallel_objects() is being used when parallel_capable is false. The loop is not being run in parallel. This may not be what was expected.")
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
+    if njobs <= 0:
+        njobs = my_size
     if njobs > my_size:
         mylog.error("You have asked for %s jobs, but you only have %s processors.",
             njobs, my_size)
@@ -357,7 +361,8 @@
         if my_rank in comm_set:
             my_new_id = i
             break
-    communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+    if parallel_capable:
+        communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
     obj_ids = na.arange(len(objects))
 
     to_share = {}


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/utilities/parameter_file_storage.py
--- a/yt/utilities/parameter_file_storage.py
+++ b/yt/utilities/parameter_file_storage.py
@@ -62,6 +62,7 @@
     _distributed = True
     _processing = False
     _owner = 0
+    _register = True
 
     def __new__(cls, *p, **k):
         self = object.__new__(cls, *p, **k)
@@ -74,6 +75,7 @@
         Otherwise, use read-only settings.
 
         """
+        if self._register == False: return
         if ytcfg.getboolean("yt", "StoreParameterFiles"):
             self._read_only = False
             self.init_db()
@@ -81,6 +83,7 @@
         else:
             self._read_only = True
             self._records = {}
+        self._register = False
 
     @parallel_simple_proxy
     def init_db(self):


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -34,15 +34,15 @@
     except ValueError:
         return False
 
-raven_colormaps = {}
+yt_colormaps = {}
 
 def add_cmap(name, cdict):
-    raven_colormaps[name] = \
+    yt_colormaps[name] = \
         cc.LinearSegmentedColormap(name,cdict,256)
     mcm.datad[name] = cdict
     mcm.__dict__[name] = cdict
     try: # API compatibility
-        mcm.register_cmap(name, raven_colormaps[name])
+        mcm.register_cmap(name, yt_colormaps[name])
     except AttributeError:
         pass
     


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -262,176 +262,6 @@
     def set_limits(self, xlim, ylim):
         for w in self.windows: w.set_limits(xlim, ylim)
 
-class RemoteWindowedVariableMeshController(MultipleWindowVariableMeshPanner):
-    def __init__(self, source, mec = None):
-        """
-        This panner controls remote windowed panners.  It requires a *source*,
-        which will be pickled and sent to the remote panners, which it will
-        create as requested.  If not supplied with a *mec* (an IPython
-        MultiEngineClient) it will create one itself.
-        """
-        if mec is None:
-            from IPython.kernel.client import get_multiengine_client
-            mec = get_multiengine_client()
-        self.mec = mec
-        self.mec.execute("import yt.extensions.image_panner")
-        self._var_name = "_image_panner_%s" % (id(self))
-        self._pf_name = "_pf_%s" % (id(self))
-        self._source_name = "_source_%s" % (id(self))
-        self.source = source
-        self.mec.execute("from yt.mods import *")
-        self.mec.execute("from yt.funcs import iterable")
-        self.mec.push({self._pf_name: self.pf})
-        self.mec.execute("%s.h" % self._pf_name)
-        self.mec.push({self._source_name: self.source})
-        # Now, because the double pickling tosses a PF hash reference inside
-        # the unpickled object, we work around it a little
-        self.mec.execute("while iterable(%s): %s = %s[1]" % (
-            self._source_name, self._source_name, self._source_name))
-        self.windows = []
-
-    def add_window(self, *args, **kwargs):
-        """
-        This will create a new remote WindowedVariableMeshImagePanner.  The
-        *args* and *kwargs* supplied here will be passed over, but the *source*
-        argument is implicitly handled by this routine.
-        """
-        engine_id = len(self.windows)
-        an = "_args_%s" % id(self)
-        kn = "_kwargs_%s" % id(self)
-        if 'callback' not in kwargs:
-            kwargs['callback'] = ImageSaver(engine_id)
-        self.mec.push({an: args, kn: kwargs}, engine_id)
-        exec_string = "%s = %s.h.windowed_image_panner(%s, *%s, **%s)" % (
-            self._var_name, self._pf_name, self._source_name, an, kn)
-        self.mec.execute(exec_string, engine_id)
-        self.windows.append(WindowedVariableMeshPannerProxy(
-            self.mec, engine_id, self._var_name, id(self)))
-
-data_object_registry["remote_image_panner"] = RemoteWindowedVariableMeshController
-
-_wrapped_methods = ["zoom", "pan", "pan_x", "pan_y", "pan_rel",
-                     "pan_rel_x", "pan_rel_y", "set_limits"]
-
-class WindowedVariableMeshPannerProxy(object):
-    class __metaclass__(type):
-        def __new__(cls, name, b, d):
-            # We add on a bunch of proxy functions
-            def return_proxy(fname):
-                def func(self, *args, **kwargs):
-                    vn = "_ret_%s" % self._cid
-                    an = "_args_%s" % self._cid
-                    kn = "_kwargs_%s" % self._cid
-                    self.mec.push({an: args, kn: kwargs}, self.engine_id)
-                    exec_string = "%s = %s.%s(*%s, **%s)" % (
-                        vn, self._var_name, fname, an, kn)
-                    print "Executing %s on %s" % (exec_string, self.engine_id)
-                    self.mec.execute(exec_string, self.engine_id)
-                    return self.mec.pull(vn, self.engine_id)
-                return func
-            new_dict = {}
-            new_dict.update(d)
-            for f in _wrapped_methods:
-                new_dict[f] = return_proxy(f)
-            return type.__new__(cls, name, b, new_dict)
-
-    def __init__(self, mec, engine_id, var_name, cid):
-        # mec here is, optionally, an instance of MultiEngineClient
-        self._var_name = var_name
-        self._cid = cid
-        self.engine_id = engine_id
-        self.mec = mec
-
-    @property
-    def bounds(self):
-        vn = "_ret_%s" % self._cid
-        self.mec.execute("%s = %s.bounds" % (vn, self._var_name),
-                         self.engine_id)
-        return self.mec.pull(vn, self.engine_id)
-
-    @property
-    def width(self):
-        vn = "_ret_%s" % self._cid
-        self.mec.execute("%s = %s.width" % (vn, self._var_name),
-                         self.engine_id)
-        return self.mec.pull(vn, self.engine_id)
-
-    @property
-    def buffer(self):
-        vn = "_ret_%s" % self._cid
-        self.mec.execute("%s = %s.buffer" % (vn, self._var_name),
-                         self.engine_id)
-        return self.mec.pull(vn, self.engine_id)
-
-    def _regenerate_buffer(self):
-        return
-
-    def _run_callback(self):
-        self.mec.execute("%s._regenerate_buffer()" % self._var_name,
-                         self.engine_id)
-        self.mec.execute("%s.callback(%s.buffer)" % (
-            self._var_name, self._var_name), self.engine_id)
-
-class ProxySource(object):
-    # This proxies only the things we know we need
-    # Note that we assume we will only have a single engine.
-    def __init__(self, mec, idnum, source_varname):
-        self.mec = mec
-        self.idnum = idnum
-        self.source_varname = source_varname
-        self.mec.execute("_tmp_%s = %s.axis" % (
-            self.idnum, self.source_varname))
-        self.axis = self.mec.pull("_tmp_%s" % self.idnum)[0]
-
-    def keys(self):
-        self.mec.execute("_tmp_%s = %s.keys()" % (
-            self.idnum, self.source_varname))
-        keys = self.mec.pull("_tmp_%s" % self.idnum)[0]
-        dd = dict( (k, None) for k in keys )
-        return dd
-
-    @property
-    def pf(self):
-        self.mec.execute("_tmp_%s = %s.pf.domain_left_edge" % (
-            self.idnum, self.source_varname))
-        DLE = self.mec.pull("_tmp_%s" % self.idnum)[0]
-        self.mec.execute("_tmp_%s = %s.pf.domain_right_edge" % (
-            self.idnum, self.source_varname))
-        DRE = self.mec.pull("_tmp_%s" % self.idnum)[0]
-        return dict(DomainLeftEdge = DLE, DomainRightEdge = DRE)
-
-class ProxyFixedResolutionBuffer(dict):
-    pass
-
-class NonLocalDataImagePanner(VariableMeshPanner):
-    def __init__(self, mec, source_varname, size, field,
-                 callback = None, viewport_callback = None):
-        self.source_varname = source_varname
-        self._var_name = "_image_panner_%s" % (id(self))
-        self.mec = mec
-        self.mec.execute("import yt.extensions.image_panner")
-        self.mec.execute("%s = yt.extensions.image_panner.VariableMeshPanner(" % (
-                        self._var_name) +
-                          "%s, (%s, %s), '%s')" % (
-                        source_varname, size[0], size[1], field))
-
-        ps = ProxySource(mec, id(self), source_varname)
-        self._prfb = ProxyFixedResolutionBuffer()
-
-        VariableMeshPanner.__init__(self, ps, size, field,
-                        callback, viewport_callback)
-
-    def _regenerate_buffer(self):
-        args = (self.xlim, self.ylim)
-        self.mec.push({'_tmp_%s' % id(self) : args}, block=False)
-        self.mec.execute("%s.set_limits(*_tmp_%s)" % (self._var_name, id(self)),
-                         block=False)
-        self.mec.execute("_tmp_%s = %s.buffer" % (id(self), self._var_name),
-                         block=False)
-        self._prfb[self.field] = self.mec.pull("_tmp_%s" % (id(self)))[0]
-        self._prfb.bounds = self.xlim + self.ylim
-        self._buffer = self._prfb
-
 class ImageSaver(object):
     def __init__(self, tile_id):
         """
@@ -479,43 +309,3 @@
         tf.close()
         self.transport.append(response_body)
 
-class PanningCeleritasStreamer(object):
-    _initialized = False
-    def __init__(self, tile_id, cmap = "algae", port = 9988,
-                 zlim = (0.0, 1.0), take_log = True):
-        """
-        This is an in-development mechanism for supplying buffers to a
-        Celeritas server.
-        """
-        self.tile_id = tile_id
-        self._port = port
-        self.cmap = cmap
-        self.zlim = zlim
-        self.take_log = True
-
-    def initialize(self, shape):
-        if isinstance(self.cmap, types.StringTypes):
-            import matplotlib.cm
-            self.cmap = matplotlib.cm.get_cmap(self.cmap)
-
-        import celeritas_streamer
-        self.cs = celeritas_streamer.CeleritasStream()
-        #print "Setting shape: %s and port: %s in %s" % (
-        #    shape, self._port, os.getpid())
-        self.cs.setSize(*shape)
-        self.cs.setLocalPort(self._port)
-        self.cs.initialize()
-        self._initialized = True
-
-    def __call__(self, val):
-        if not self._initialized: self.initialize(val.shape)
-        if self.take_log:
-            vv = na.log10(val)
-        else:
-            vv = val.copy()
-        na.subtract(vv, self.zlim[0], vv)
-        na.divide(vv, (self.zlim[1]-self.zlim[0]), vv)
-        new_buf = self.cmap(vv)[:,:,:3]
-        na.multiply(new_buf, 255.0, new_buf)
-        new_buf = new_buf.astype('uint8')
-        self.cs.readFromRGBMemAndSend(new_buf)


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -400,7 +400,7 @@
         if coord == None:
             coord = center[axis]
         if obj is None:
-            if field_parameters == None: field_parameters = {}
+            if field_parameters is None: field_parameters = {}
             obj = self.pf.hierarchy.slice(axis, coord, field,
                             center=center, **field_parameters)
         p = self._add_plot(SlicePlot(
@@ -1065,7 +1065,7 @@
             the y-axis.  All subsequent fields will be binned and their
             profiles added to the underlying `BinnedProfile2D`.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted
@@ -1189,7 +1189,7 @@
             The center to be used for things like radius and radial velocity.
             Defaults to the center of the plot collection.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -164,8 +164,8 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = na.mgrid[0:plot.image._A.shape[0]-1:ny*1j]# + 0.5*factor
+        Y = na.mgrid[0:plot.image._A.shape[1]-1:nx*1j]# + 0.5*factor
         if self.normalize:
             nn = na.sqrt(pixX**2 + pixY**2)
             pixX /= nn
@@ -723,7 +723,7 @@
                  font_size=8, print_halo_size=False,
                  print_halo_mass=False, width=None):
         """
-        Accepts a :class:`yt.lagos.HopList` *hop_output* and plots up to
+        Accepts a :class:`yt.HopList` *hop_output* and plots up to
         *max_number* (None for unlimited) halos as circles.
         """
         self.hop_output = hop_output


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -35,7 +35,7 @@
     x_dict, \
     y_dict, \
     axis_names
-from .color_maps import raven_colormaps
+from .color_maps import yt_colormaps
 
 class CallbackRegistryHandler(object):
     def __init__(self, plot):
@@ -226,8 +226,8 @@
         Change the colormap of this plot to *cmap*.
         """
         if isinstance(cmap, types.StringTypes):
-            if str(cmap) in raven_colormaps:
-                cmap = raven_colormaps[str(cmap)]
+            if str(cmap) in yt_colormaps:
+                cmap = yt_colormaps[str(cmap)]
             elif hasattr(matplotlib.cm, cmap):
                 cmap = getattr(matplotlib.cm, cmap)
         self.cmap = cmap


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -198,7 +198,7 @@
             the y-axis.  All subsequent fields will be binned and their
             profiles added to the underlying `BinnedProfile2D`.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -38,7 +38,7 @@
 
     Parameters
     ----------
-    pf : `~yt.lagos.StaticOutput`
+    pf : `~yt.data_objects.StaticOutput`
         This is the parameter file to streamline
     pos : array_like
         An array of initial starting positions of the streamlines.


diff -r bfcf3519e081e024b29b5b1806e6dbc6fcbd7805 -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -40,6 +40,9 @@
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from numpy import pi
 
+from yt.utilities._amr_utils.grid_traversal import \
+    PartitionedGrid, ProjectionSampler, VolumeRenderSampler
+
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
                  resolution, transfer_function,
@@ -193,6 +196,9 @@
         self.steady_north = steady_north
         self.expand_factor = expand_factor
         # This seems to be necessary for now.  Not sure what goes wrong when not true.
+        if na.all(north_vector == normal_vector):
+            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
+            north_vector == None
         if north_vector is not None: self.steady_north=True
         self.north_vector = north_vector
         self.rotation_vector = north_vector
@@ -315,11 +321,10 @@
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
         positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
         bounds = (px.min(), px.max(), py.min(), py.max())
-        vector_plane = VectorPlane(positions, self.box_vectors[2],
-                                      self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
-        return vector_plane
+        return (positions, self.box_vectors[2],
+                self.back_center, bounds, image,
+                self.unit_vectors[0],
+                self.unit_vectors[1])
 
     def snapshot(self, fn = None, clip_ratio = None):
         r"""Ray-cast the camera.
@@ -343,19 +348,25 @@
         """
         image = na.zeros((self.resolution[0], self.resolution[1], 3),
                          dtype='float64', order='C')
-        vector_plane = self.get_vector_plane(image)
-        tfp = TransferFunctionProxy(self.transfer_function) # Reset it every time
-        tfp.ns = self.sub_samples
+        rotp = na.concatenate([self.inv_mat.ravel('F'), self.back_center.ravel()])
+        args = (rotp, self.box_vectors[2], self.back_center,
+                (-self.width[0]/2.0, self.width[0]/2.0,
+                 -self.width[1]/2.0, self.width[1]/2.0),
+                image, self.unit_vectors[0], self.unit_vectors[1],
+                na.array(self.width),
+                self.transfer_function, self.sub_samples)
+        sampler = VolumeRenderSampler(*args)
         self.volume.initialize_source()
 
         pbar = get_pbar("Ray casting",
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         for brick in self.volume.traverse(self.back_center, self.front_center, image):
-            brick.cast_plane(tfp, vector_plane)
+            sampler(brick)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
+        image = sampler.aimage
 
         if self.comm.rank is 0 and fn is not None:
             if clip_ratio is not None:
@@ -446,6 +457,8 @@
                 if not iterable(final_width):
                     width = na.array([final_width, final_width, final_width]) 
                     # front/back, left/right, top/bottom
+                if (self.center == 0.0).all():
+                    self.center += (na.array(final) - self.center) / (10. * n_steps)
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
 	    else:
@@ -789,7 +802,7 @@
         return (left_camera, right_camera)
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, volume = None):
+                        field, weight = None, volume = None, no_ghost = True):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -821,6 +834,14 @@
     volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
         The volume to ray cast through.  Can be specified for finer-grained
         control, but otherwise will be automatically generated.
+    no_ghost: bool, optional
+        Optimization option.  If True, homogenized bricks will
+        extrapolate out from grid instead of interpolating from
+        ghost zones that have to first be calculated.  This can
+        lead to large speed improvements, but at a loss of
+        accuracy/smoothness in resulting image.  The effects are
+        less notable when the transfer function is smooth and
+        broad. Default: True
 
     Returns
     -------
@@ -848,7 +869,7 @@
     cam = pf.h.camera(center, normal_vector, width, resolution, tf,
                       fields = fields,
                       log_fields = [False] * len(fields),
-                      volume = volume)
+                      volume = volume, no_ghost = no_ghost)
     vals = cam.snapshot()
     image = vals[:,:,0]
     if weight is None:



https://bitbucket.org/yt_analysis/yt/changeset/53a8a898b645/
changeset:   53a8a898b645
branch:      geometry_handling
user:        MatthewTurk
date:        2011-12-16 16:01:36
summary:     Adding cutting plane grid selection (lots faster, but it was not an expensive
operation)
affected #:  2 files

diff -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f -r 53a8a898b645edcb6e0107ed3bf9e375e96dfb62 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,7 +41,8 @@
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
     QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
-    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids
+    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids, \
+    slice_grids, cutting_plane_grids
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -995,9 +996,10 @@
         self.ActiveDimensions = (t.shape[1], 1, 1)
 
     def _get_list_of_grids(self):
-        goodI = ((self.pf.h.grid_right_edge[:,self.axis] > self.coord)
-              &  (self.pf.h.grid_left_edge[:,self.axis] <= self.coord ))
-        self._grids = self.pf.h.grids[goodI] # Using sources not hierarchy
+        gi = slice_grids(self, 
+                self.hierarchy.grid_left_edge,
+                self.hierarchy.grid_right_edge)
+        self._grids = self.hierarchy.grids[gi]
 
     def __cut_mask_child_mask(self, grid):
         mask = grid.child_mask.copy()
@@ -1150,25 +1152,9 @@
         return self._norm_vec
 
     def _get_list_of_grids(self):
-        # Recall that the projection of the distance vector from a point
-        # onto the normal vector of a plane is:
-        # D = (a x_0 + b y_0 + c z_0 + d)/sqrt(a^2+b^2+c^2)
-        # @todo: Convert to using corners
-        LE = self.pf.h.grid_left_edge
-        RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
-                             [RE[:,0],RE[:,1],RE[:,2]],
-                             [LE[:,0],LE[:,1],RE[:,2]],
-                             [RE[:,0],RE[:,1],LE[:,2]],
-                             [LE[:,0],RE[:,1],RE[:,2]],
-                             [RE[:,0],LE[:,1],LE[:,2]],
-                             [LE[:,0],RE[:,1],LE[:,2]],
-                             [RE[:,0],LE[:,1],RE[:,2]]])
-        # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        self.D = D
-        self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+        gridi = cutting_plane_grids(self, self.pf.h.grid_left_edge,
+                                          self.pf.h.grid_right_edge)
+        self._grids = self.hierarchy.grids[gridi]
 
     @cache_mask
     def _get_cut_mask(self, grid):


diff -r 43fc7bb777fbdcbb681809ffe06a3c176534ea7f -r 53a8a898b645edcb6e0107ed3bf9e375e96dfb62 yt/utilities/_amr_utils/geometry_utils.pyx
--- a/yt/utilities/_amr_utils/geometry_utils.pyx
+++ b/yt/utilities/_amr_utils/geometry_utils.pyx
@@ -110,6 +110,53 @@
             continue
     return gridi
 
+def slice_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
+                      np.ndarray[np.float64_t, ndim=2] right_edges):
+    cdef int i, ax
+    cdef int ng = left_edges.shape[0]
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    ax = dobj.axis
+    cdef np.float64_t coord = dobj.coord
+    for i in range(ng):
+        if left_edges[i, ax] <= coord and \
+           right_edges[i, ax] > coord:
+            gridi[i] = 1
+    return gridi
+
+def cutting_plane_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges):
+    cdef int i
+    cdef int ng = left_edges.shape[0]
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    cdef np.float64_t *arr[2]
+    arr[0] = <np.float64_t *> left_edges.data
+    arr[1] = <np.float64_t *> right_edges.data
+    cdef np.float64_t x, y, z
+    cdef np.float64_t norm_vec[3]
+    cdef np.float64_t d = dobj._d # offset to center
+    cdef np.float64_t gd # offset to center
+    cdef np.int64_t all_under, all_over
+    for i in range(3):
+        norm_vec[i] = dobj._norm_vec[i]
+    for i in range(ng):
+        all_under = 1
+        all_over = 1
+        # Check each corner
+        for xi in range(2):
+            x = arr[xi][i * 3 + 0]
+            for yi in range(2):
+                y = arr[yi][i * 3 + 1]
+                for zi in range(2):
+                    z = arr[zi][i * 3 + 2]
+                    gd = ( x*norm_vec[0]
+                         + y*norm_vec[1]
+                         + z*norm_vec[2]) + d
+                    if gd <= 0: all_over = 0
+                    if gd >= 0: all_under = 0
+        if not (all_over == 1 or all_under == 1):
+            gridi[i] = 1
+    return gridi
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)



https://bitbucket.org/yt_analysis/yt/changeset/5ab40a050aa4/
changeset:   5ab40a050aa4
branch:      geometry_handling
user:        MatthewTurk
date:        2011-12-16 16:59:30
summary:     Need this .astype or it just selects grid 1 tons of times.
affected #:  1 file

diff -r 53a8a898b645edcb6e0107ed3bf9e375e96dfb62 -r 5ab40a050aa4b615b57a183462d7113bc1b6a05a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -42,7 +42,7 @@
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
     QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
     march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids, \
-    slice_grids, cutting_plane_grids
+    slice_grids, cutting_plane_grids, cutting_plane_cells
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -1154,7 +1154,7 @@
     def _get_list_of_grids(self):
         gridi = cutting_plane_grids(self, self.pf.h.grid_left_edge,
                                           self.pf.h.grid_right_edge)
-        self._grids = self.hierarchy.grids[gridi]
+        self._grids = self.hierarchy.grids[gridi.astype("bool")]
 
     @cache_mask
     def _get_cut_mask(self, grid):



https://bitbucket.org/yt_analysis/yt/changeset/dc749a58f122/
changeset:   dc749a58f122
branch:      geometry_handling
user:        MatthewTurk
date:        2011-12-16 17:02:05
summary:     Adding first pass at cutting plane selection.  An idea is starting to take root
for how to extend this using generalized grid traversal.
affected #:  2 files

diff -r 5ab40a050aa4b615b57a183462d7113bc1b6a05a -r dc749a58f12299630ecd20284a5d59dbad923c63 yt/utilities/_amr_utils/geometry_utils.pyx
--- a/yt/utilities/_amr_utils/geometry_utils.pyx
+++ b/yt/utilities/_amr_utils/geometry_utils.pyx
@@ -26,7 +26,18 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from stdlib cimport malloc, free, abs
+from stdlib cimport malloc, free
+
+cdef extern from "math.h":
+    double exp(double x) nogil
+    float expf(float x) nogil
+    long double expl(long double x) nogil
+    double floor(double x) nogil
+    double ceil(double x) nogil
+    double fmod(double x, double y) nogil
+    double log2(double x) nogil
+    long int lrint(double x) nogil
+    double fabs(double x) nogil
 
 # These routines are separated into a couple different categories:
 #
@@ -160,6 +171,44 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef inline int cutting_plane_cell(
+                        np.float64_t x, np.float64_t y, np.float64_t z,
+                        np.float64_t norm_vec[3], np.float64_t d,
+                        np.float64_t dist):
+    cdef np.float64_t cd = x*norm_vec[0] + y*norm_vec[1] + z*norm_vec[2] + d
+    if fabs(cd) <= dist: return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def cutting_plane_cells(dobj, gobj):
+    cdef np.ndarray[np.int32_t, ndim=3] mask 
+    cdef np.ndarray[np.float64_t, ndim=1] left_edge = gobj.LeftEdge
+    cdef np.ndarray[np.float64_t, ndim=1] dds = gobj.dds
+    cdef int i, j, k
+    cdef np.float64_t x, y, z, dist
+    cdef np.float64_t norm_vec[3]
+    cdef np.float64_t d = dobj._d
+
+    mask = np.zeros(gobj.ActiveDimensions, dtype='int32')
+    for i in range(3): norm_vec[i] = dobj._norm_vec[i]
+    dist = 0.5*(dds[0]*dds[0] + dds[1]*dds[1] + dds[2]*dds[2])**0.5
+    x = left_edge[0] + dds[0] * 0.5
+    for i in range(mask.shape[0]):
+        y = left_edge[1] + dds[1] * 0.5
+        for j in range(mask.shape[1]):
+            z = left_edge[2] + dds[2] * 0.5
+            for k in range(mask.shape[2]):
+                mask[i,j,k] = cutting_plane_cell(x, y, z, norm_vec, d, dist)
+                z += dds[1]
+            y += dds[1]
+        x += dds[0]
+    return mask
+                
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
                         np.ndarray[np.float64_t, ndim=1] right_edge,
                         int level,


diff -r 5ab40a050aa4b615b57a183462d7113bc1b6a05a -r dc749a58f12299630ecd20284a5d59dbad923c63 yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -126,6 +126,8 @@
                 depends=["yt/utilities/_amr_utils/freetype_includes.h"])
     config.add_extension("geometry_utils", 
                 ["yt/utilities/_amr_utils/geometry_utils.pyx"],
+               extra_compile_args=['-fopenmp'],
+               extra_link_args=['-fopenmp'],
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
     config.add_extension("Interpolators", 
                 ["yt/utilities/_amr_utils/Interpolators.pyx"],



https://bitbucket.org/yt_analysis/yt/changeset/060e702600d1/
changeset:   060e702600d1
branch:      geometry_handling
user:        MatthewTurk
date:        2011-12-21 19:25:18
summary:     First pass at star particle inclusion.  Compiles.
affected #:  2 files

diff -r dc749a58f12299630ecd20284a5d59dbad923c63 -r 060e702600d11d25f81f33b1aa14f2049f9a0111 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -84,6 +84,7 @@
     cdef public object my_data
     cdef public object LeftEdge
     cdef public object RightEdge
+    cdef int parent_grid_id
     cdef VolumeContainer *container
 
     @cython.boundscheck(False)
@@ -95,6 +96,7 @@
                   np.ndarray[np.int64_t, ndim=1] dims):
         # The data is likely brought in via a slice, so we copy it
         cdef np.ndarray[np.float64_t, ndim=3] tdata
+        self.parent_grid_id = parent_grid_id
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
         self.container = <VolumeContainer *> \
@@ -324,6 +326,10 @@
     int n_samples
     FieldInterpolationTable *fits
     int field_table_ids[6]
+    np.float64_t star_coeff
+    np.float64_t star_er
+    np.float64_t star_sigma_num
+    kdtree_utils.kdtree *star_list
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -368,10 +374,102 @@
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
 
+cdef class star_kdtree_container:
+    cdef kdtree_utils.kdtree *tree
+    cdef public np.float64_t sigma
+    cdef public np.float64_t coeff
+
+    def __init__(self):
+        self.tree = kdtree_utils.kd_create(3)
+
+    def add_points(self,
+                   np.ndarray[np.float64_t, ndim=1] pos_x,
+                   np.ndarray[np.float64_t, ndim=1] pos_y,
+                   np.ndarray[np.float64_t, ndim=1] pos_z,
+                   np.ndarray[np.float64_t, ndim=2] star_colors):
+        cdef int i, n
+        cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
+        for i in range(pos_x.shape[0]):
+            kdtree_utils.kd_insert3(self.tree,
+                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+
+    def __dealloc__(self):
+        kdtree_utils.kd_free(self.tree)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void volume_render_stars_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    cdef kdtree_utils.kdres *ballq = NULL
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
+    cdef int nstars
+    cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vc.n_fields):
+        slopes[i] = offset_interpolate(vc.dims, dp,
+                        vc.data[i] + offset)
+    cdef np.float64_t temp
+    # Now we get the ball-tree result for the stars near our cell center.
+    for i in range(3):
+        cell_left[i] = index[i] * vc.dds[i] + vc.left_edge[i]
+        pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        local_dds[i] = v_dir[i] * dt
+    ballq = kdtree_utils.kd_nearest_range3(
+        vri.star_list, cell_left[0] + vc.dds[0]*0.5,
+                        cell_left[1] + vc.dds[1]*0.5,
+                        cell_left[2] + vc.dds[2]*0.5,
+                        vri.star_er + 0.9*vc.dds[0])
+                                    # ~0.866 + a bit
+
+    nstars = kdtree_utils.kd_res_size(ballq)
+    for i in range(vc.n_fields):
+        temp = slopes[i]
+        slopes[i] -= offset_interpolate(vc.dims, dp,
+                         vc.data[i] + offset)
+        slopes[i] *= -1.0/vri.n_samples
+        dvs[i] = temp
+    for dti in range(vri.n_samples): 
+        # Now we add the contribution from stars
+        for i in range(nstars):
+            kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
+            colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
+            kdtree_utils.kd_res_next(ballq)
+            gexp = (px - pos[0])*(px - pos[0]) \
+                 + (py - pos[1])*(py - pos[1]) \
+                 + (pz - pos[2])*(pz - pos[2])
+            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
+            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+        for i in range(3):
+            pos[i] += local_dds[i]
+        FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
+                          vri.field_table_ids)
+        for i in range(vc.n_fields):
+            dvs[i] += slopes[i]
+
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
+    cdef kdtree_utils.kdtree **trees
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -381,7 +479,8 @@
                   np.ndarray[np.float64_t, ndim=1] x_vec,
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
-                  tf_obj, n_samples = 10):
+                  tf_obj, n_samples = 10,
+                  star_list = None):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
                                x_vec, y_vec, width)
         cdef int i
@@ -409,9 +508,22 @@
         for i in range(6):
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
+        cdef star_kdtree_container skdc
+        if star_list is None:
+            self.trees = NULL
+        else:
+            self.trees = <kdtree_utils.kdtree **> malloc(
+                sizeof(kdtree_utils.kdtree*) * len(star_list))
+            for i in range(len(star_list)):
+                skdc = star_list[i]
+                self.trees[i] = skdc.tree
 
     def setup(self, PartitionedGrid pg):
-        self.sampler = volume_render_sampler
+        if self.trees == NULL:
+            self.sampler = volume_render_sampler
+        else:
+            self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):
         return


diff -r dc749a58f12299630ecd20284a5d59dbad923c63 -r 060e702600d11d25f81f33b1aa14f2049f9a0111 yt/utilities/_amr_utils/kdtree_utils.pxd
--- a/yt/utilities/_amr_utils/kdtree_utils.pxd
+++ b/yt/utilities/_amr_utils/kdtree_utils.pxd
@@ -35,18 +35,19 @@
     void kd_free(kdtree *tree)
     
     int kd_insert3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z, void *data)
-    kdres *kd_nearest3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z)
+    kdres *kd_nearest3(kdtree *tree, np.float64_t x, np.float64_t y,
+                       np.float64_t z) nogil
 
     kdres *kd_nearest_range3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z,
-                             np.float64_t range)
+                             np.float64_t range) nogil
 
-    void kd_res_free(kdres *set)
-    int kd_res_size(kdres *set)
-    int kd_res_next(kdres *set)
-    void kd_res_rewind(kdres *set)
+    void kd_res_free(kdres *set) nogil
+    int kd_res_size(kdres *set) nogil
+    int kd_res_next(kdres *set) nogil
+    void kd_res_rewind(kdres *set) nogil
 
-    void kd_res_item3(kdres *set, np.float64_t *x, np.float64_t *y, np.float64_t *z)
-    void *kd_res_item_data(kdres *set)
+    void kd_res_item3(kdres *set, np.float64_t *x, np.float64_t *y,
+                      np.float64_t *z) nogil
+    void *kd_res_item_data(kdres *set) nogil
 
     void kd_data_destructor(kdtree *tree, void (*destr)(void*))
-    void *kd_res_item_data(kdres *set)



https://bitbucket.org/yt_analysis/yt/changeset/2d81eb713419/
changeset:   2d81eb713419
branch:      geometry_handling
user:        MatthewTurk
date:        2011-12-21 22:45:28
summary:     Split off the marching cubes and isocontour code; removed some imports to make
sure we're always using the new refactored versions of things.
affected #:  7 files

diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/utilities/_amr_utils/fixed_interpolator.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/fixed_interpolator.pxd
@@ -0,0 +1,43 @@
+"""
+Fixed interpolator includes
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+
+
+cdef extern from "FixedInterpolator.h":
+    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                  np.float64_t *data) nogil
+    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3],
+                                    np.float64_t *data) nogil
+    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                       np.float64_t *data) nogil
+    void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
+                       np.float64_t grad[3]) nogil
+    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval) nogil
+    void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
+                       np.float64_t vl[3], np.float64_t dds[3],
+                       np.float64_t x, np.float64_t y, np.float64_t z,
+                       int vind1, int vind2) nogil
+


diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -32,6 +32,7 @@
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 from field_interpolation_tables cimport \
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer
+from fixed_interpolator cimport *
 
 from cython.parallel import prange, parallel, threadid
 
@@ -56,21 +57,6 @@
                 int index[3],
                 void *data) nogil
 
-cdef extern from "FixedInterpolator.h":
-    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
-                                  np.float64_t *data) nogil
-    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3],
-                                    np.float64_t *data) nogil
-    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
-                                       np.float64_t *data) nogil
-    void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
-                       np.float64_t grad[3]) nogil
-    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval) nogil
-    void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
-                       np.float64_t vl[3], np.float64_t dds[3],
-                       np.float64_t x, np.float64_t y, np.float64_t z,
-                       int vind1, int vind2) nogil
-
 cdef struct VolumeContainer:
     int n_fields
     np.float64_t **data


diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/utilities/_amr_utils/marching_cubes.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/marching_cubes.pyx
@@ -0,0 +1,659 @@
+"""
+Marching cubes implementation
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+cimport cython
+import numpy as np
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from stdlib cimport malloc, free, abs
+from fixed_interpolator cimport *
+
+cdef struct Triangle:
+    Triangle *next
+    np.float64_t p[3][3]
+    np.float64_t val
+
+cdef struct TriangleCollection:
+    int count
+    Triangle *first
+    Triangle *current
+
+cdef Triangle *AddTriangle(Triangle *self,
+                    np.float64_t p0[3], np.float64_t p1[3], np.float64_t p2[3]):
+    cdef Triangle *nn = <Triangle *> malloc(sizeof(Triangle))
+    if self != NULL:
+        self.next = nn
+    cdef int i
+    for i in range(3):
+        nn.p[0][i] = p0[i]
+    for i in range(3):
+        nn.p[1][i] = p1[i]
+    for i in range(3):
+        nn.p[2][i] = p2[i]
+    nn.next = NULL
+    return nn
+
+cdef int CountTriangles(Triangle *first):
+    cdef int count = 0
+    cdef Triangle *this = first
+    while this != NULL:
+        count += 1
+        this = this.next
+    return count
+
+cdef void FillTriangleValues(np.ndarray[np.float64_t, ndim=1] values,
+                             Triangle *first):
+    cdef Triangle *this = first
+    cdef Triangle *last
+    cdef int i = 0
+    while this != NULL:
+        values[i] = this.val
+        i += 1
+        last = this
+        this = this.next
+
+cdef void WipeTriangles(Triangle *first):
+    cdef Triangle *this = first
+    cdef Triangle *last
+    while this != NULL:
+        last = this
+        this = this.next
+        free(last)
+
+cdef void FillAndWipeTriangles(np.ndarray[np.float64_t, ndim=2] vertices,
+                               Triangle *first):
+    cdef int count = 0
+    cdef Triangle *this = first
+    cdef Triangle *last
+    cdef int i, j
+    while this != NULL:
+        for i in range(3):
+            for j in range(3):
+                vertices[count, j] = this.p[i][j]
+            count += 1 # Do it at the end because it's an index
+        last = this
+        this = this.next
+        free(last)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int march_cubes(
+                 np.float64_t gv[8], np.float64_t isovalue,
+                 np.float64_t dds[3],
+                 np.float64_t x, np.float64_t y, np.float64_t z,
+                 TriangleCollection *triangles):
+    cdef int *edge_table=[
+    0x0  , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
+    0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
+    0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
+    0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
+    0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
+    0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
+    0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
+    0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
+    0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
+    0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
+    0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
+    0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
+    0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
+    0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
+    0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
+    0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
+    0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
+    0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
+    0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
+    0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
+    0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
+    0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
+    0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
+    0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
+    0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
+    0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
+    0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
+    0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
+    0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
+    0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
+    0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
+    0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0   ]
+
+    cdef int **tri_table = \
+    [[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1],
+    [3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1],
+    [3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1],
+    [3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1],
+    [9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1],
+    [9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
+    [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1],
+    [8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1],
+    [9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
+    [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1],
+    [3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1],
+    [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1],
+    [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1],
+    [4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
+    [5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1],
+    [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1],
+    [9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
+    [0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
+    [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1],
+    [10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1],
+    [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1],
+    [5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1],
+    [9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1],
+    [0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1],
+    [1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1],
+    [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1],
+    [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1],
+    [2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1],
+    [7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1],
+    [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1],
+    [11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1],
+    [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1],
+    [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1],
+    [11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
+    [1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1],
+    [9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1],
+    [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1],
+    [2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
+    [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1],
+    [6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1],
+    [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1],
+    [6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1],
+    [5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1],
+    [1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
+    [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1],
+    [6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1],
+    [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1],
+    [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1],
+    [3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
+    [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1],
+    [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1],
+    [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1],
+    [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1],
+    [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1],
+    [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1],
+    [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1],
+    [10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1],
+    [10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1],
+    [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1],
+    [1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1],
+    [0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1],
+    [10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1],
+    [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1],
+    [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1],
+    [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1],
+    [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1],
+    [3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1],
+    [6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1],
+    [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1],
+    [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1],
+    [10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1],
+    [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1],
+    [7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1],
+    [7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1],
+    [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1],
+    [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1],
+    [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1],
+    [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1],
+    [0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1],
+    [7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
+    [10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
+    [2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
+    [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1],
+    [7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1],
+    [2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1],
+    [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1],
+    [10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1],
+    [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1],
+    [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1],
+    [7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1],
+    [6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1],
+    [8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1],
+    [6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1],
+    [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1],
+    [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1],
+    [8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1],
+    [0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1],
+    [1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1],
+    [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1],
+    [10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1],
+    [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1],
+    [10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
+    [5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
+    [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1],
+    [9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
+    [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1],
+    [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1],
+    [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1],
+    [7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1],
+    [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1],
+    [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1],
+    [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1],
+    [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1],
+    [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1],
+    [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1],
+    [6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1],
+    [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1],
+    [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1],
+    [6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1],
+    [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1],
+    [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1],
+    [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1],
+    [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1],
+    [9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1],
+    [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1],
+    [1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1],
+    [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1],
+    [0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1],
+    [5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1],
+    [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1],
+    [11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1],
+    [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1],
+    [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1],
+    [2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1],
+    [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1],
+    [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1],
+    [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1],
+    [1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1],
+    [9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1],
+    [9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1],
+    [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1],
+    [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1],
+    [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1],
+    [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1],
+    [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1],
+    [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1],
+    [9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1],
+    [5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1],
+    [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1],
+    [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1],
+    [8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1],
+    [9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1],
+    [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1],
+    [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1],
+    [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1],
+    [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1],
+    [11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1],
+    [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1],
+    [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1],
+    [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1],
+    [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1],
+    [1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1],
+    [4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1],
+    [3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1],
+    [0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1],
+    [9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1],
+    [1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
+    cdef np.float64_t vertlist[12][3]
+    cdef int cubeindex = 0
+    cdef int n
+    cdef int nt = 0
+    for n in range(8):
+        if gv[n] < isovalue:
+            cubeindex |= (1 << n)
+    if edge_table[cubeindex] == 0:
+        return 0
+    if (edge_table[cubeindex] & 1): # 0,0,0 with 1,0,0
+        vertex_interp(gv[0], gv[1], isovalue, vertlist[0],
+                      dds, x, y, z, 0, 1)
+    if (edge_table[cubeindex] & 2): # 1,0,0 with 1,1,0
+        vertex_interp(gv[1], gv[2], isovalue, vertlist[1],
+                      dds, x, y, z, 1, 2)
+    if (edge_table[cubeindex] & 4): # 1,1,0 with 0,1,0
+        vertex_interp(gv[2], gv[3], isovalue, vertlist[2],
+                      dds, x, y, z, 2, 3)
+    if (edge_table[cubeindex] & 8): # 0,1,0 with 0,0,0
+        vertex_interp(gv[3], gv[0], isovalue, vertlist[3],
+                      dds, x, y, z, 3, 0)
+    if (edge_table[cubeindex] & 16): # 0,0,1 with 1,0,1
+        vertex_interp(gv[4], gv[5], isovalue, vertlist[4],
+                      dds, x, y, z, 4, 5)
+    if (edge_table[cubeindex] & 32): # 1,0,1 with 1,1,1
+        vertex_interp(gv[5], gv[6], isovalue, vertlist[5],
+                      dds, x, y, z, 5, 6)
+    if (edge_table[cubeindex] & 64): # 1,1,1 with 0,1,1
+        vertex_interp(gv[6], gv[7], isovalue, vertlist[6],
+                      dds, x, y, z, 6, 7)
+    if (edge_table[cubeindex] & 128): # 0,1,1 with 0,0,1
+        vertex_interp(gv[7], gv[4], isovalue, vertlist[7],
+                      dds, x, y, z, 7, 4)
+    if (edge_table[cubeindex] & 256): # 0,0,0 with 0,0,1
+        vertex_interp(gv[0], gv[4], isovalue, vertlist[8],
+                      dds, x, y, z, 0, 4)
+    if (edge_table[cubeindex] & 512): # 1,0,0 with 1,0,1
+        vertex_interp(gv[1], gv[5], isovalue, vertlist[9],
+                      dds, x, y, z, 1, 5)
+    if (edge_table[cubeindex] & 1024): # 1,1,0 with 1,1,1
+        vertex_interp(gv[2], gv[6], isovalue, vertlist[10],
+                      dds, x, y, z, 2, 6)
+    if (edge_table[cubeindex] & 2048): # 0,1,0 with 0,1,1
+        vertex_interp(gv[3], gv[7], isovalue, vertlist[11],
+                      dds, x, y, z, 3, 7)
+    n = 0
+    while 1:
+        triangles.current = AddTriangle(triangles.current,
+                    vertlist[tri_table[cubeindex][n  ]],
+                    vertlist[tri_table[cubeindex][n+1]],
+                    vertlist[tri_table[cubeindex][n+2]])
+        triangles.count += 1
+        nt += 1
+        if triangles.first == NULL:
+            triangles.first = triangles.current
+        n += 3
+        if tri_table[cubeindex][n] == -1: break
+    return nt
+    
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def march_cubes_grid(np.float64_t isovalue,
+                     np.ndarray[np.float64_t, ndim=3] values,
+                     np.ndarray[np.int32_t, ndim=3] mask,
+                     np.ndarray[np.float64_t, ndim=1] left_edge,
+                     np.ndarray[np.float64_t, ndim=1] dxs,
+                     obj_sample = None):
+    cdef int dims[3]
+    cdef int i, j, k, n, m, nt
+    cdef int offset
+    cdef np.float64_t gv[8], pos[3], point[3], idds[3]
+    cdef np.float64_t *intdata = NULL
+    cdef np.float64_t *sdata = NULL
+    cdef np.float64_t x, y, z, do_sample
+    cdef np.ndarray[np.float64_t, ndim=3] sample
+    cdef np.ndarray[np.float64_t, ndim=1] sampled
+    cdef TriangleCollection triangles
+    cdef Triangle *last, *current
+    if obj_sample is not None:
+        sample = obj_sample
+        sdata = <np.float64_t *> sample.data
+        do_sample = 1
+    else:
+        do_sample = 0
+    for i in range(3):
+        dims[i] = values.shape[i] - 1
+        idds[i] = 1.0 / dxs[i]
+    triangles.first = triangles.current = NULL
+    last = current = NULL
+    triangles.count = 0
+    cdef np.float64_t *data = <np.float64_t *> values.data
+    cdef np.float64_t *dds = <np.float64_t *> dxs.data
+    pos[0] = left_edge[0]
+    for i in range(dims[0]):
+        pos[1] = left_edge[1]
+        for j in range(dims[1]):
+            pos[2] = left_edge[2]
+            for k in range(dims[2]):
+                if mask[i,j,k] == 1:
+                    offset = i * (dims[1] + 1) * (dims[2] + 1) \
+                           + j * (dims[2] + 1) + k
+                    intdata = data + offset
+                    offset_fill(dims, intdata, gv)
+                    nt = march_cubes(gv, isovalue, dds, pos[0], pos[1], pos[2],
+                                &triangles)
+                    if do_sample == 1 and nt > 0:
+                        # At each triangle's center, sample our secondary field
+                        if last == NULL and triangles.first != NULL:
+                            current = triangles.first
+                            last = NULL
+                        elif last != NULL:
+                            current = last.next
+                        while current != NULL:
+                            for n in range(3):
+                                point[n] = 0.0
+                            for n in range(3):
+                                for m in range(3):
+                                    point[m] += (current.p[n][m]-pos[m])*idds[m]
+                            for n in range(3):
+                                point[n] /= 3.0
+                            current.val = offset_interpolate(dims, point,
+                                                             sdata + offset)
+                            last = current
+                            if current.next == NULL: break
+                            current = current.next
+                pos[2] += dds[2]
+            pos[1] += dds[1]
+        pos[0] += dds[0]
+    # Hallo, we are all done.
+    cdef np.ndarray[np.float64_t, ndim=2] vertices 
+    vertices = np.zeros((triangles.count*3,3), dtype='float64')
+    if do_sample == 1:
+        sampled = np.zeros(triangles.count, dtype='float64')
+        FillTriangleValues(sampled, triangles.first)
+        FillAndWipeTriangles(vertices, triangles.first)
+        return vertices, sampled
+    FillAndWipeTriangles(vertices, triangles.first)
+    return vertices
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def march_cubes_grid_flux(
+                     np.float64_t isovalue,
+                     np.ndarray[np.float64_t, ndim=3] values,
+                     np.ndarray[np.float64_t, ndim=3] v1,
+                     np.ndarray[np.float64_t, ndim=3] v2,
+                     np.ndarray[np.float64_t, ndim=3] v3,
+                     np.ndarray[np.float64_t, ndim=3] flux_field,
+                     np.ndarray[np.int32_t, ndim=3] mask,
+                     np.ndarray[np.float64_t, ndim=1] left_edge,
+                     np.ndarray[np.float64_t, ndim=1] dxs):
+    cdef int dims[3]
+    cdef int i, j, k, n, m
+    cdef int offset
+    cdef np.float64_t gv[8]
+    cdef np.float64_t *intdata = NULL
+    cdef TriangleCollection triangles
+    cdef Triangle *current = NULL
+    cdef Triangle *last = NULL
+    cdef np.float64_t *data = <np.float64_t *> values.data
+    cdef np.float64_t *v1data = <np.float64_t *> v1.data
+    cdef np.float64_t *v2data = <np.float64_t *> v2.data
+    cdef np.float64_t *v3data = <np.float64_t *> v3.data
+    cdef np.float64_t *fdata = <np.float64_t *> flux_field.data
+    cdef np.float64_t *dds = <np.float64_t *> dxs.data
+    cdef np.float64_t flux = 0.0
+    cdef np.float64_t center[3], point[3], wval, temp, area, s
+    cdef np.float64_t cell_pos[3], fv[3], idds[3], normal[3]
+    for i in range(3):
+        dims[i] = values.shape[i] - 1
+        idds[i] = 1.0 / dds[i]
+    triangles.first = triangles.current = NULL
+    triangles.count = 0
+    cell_pos[0] = left_edge[0]
+    for i in range(dims[0]):
+        cell_pos[1] = left_edge[1]
+        for j in range(dims[1]):
+            cell_pos[2] = left_edge[2]
+            for k in range(dims[2]):
+                if mask[i,j,k] == 1:
+                    offset = i * (dims[1] + 1) * (dims[2] + 1) \
+                           + j * (dims[2] + 1) + k
+                    intdata = data + offset
+                    offset_fill(dims, intdata, gv)
+                    march_cubes(gv, isovalue, dds,
+                                cell_pos[0], cell_pos[1], cell_pos[2],
+                                &triangles)
+                    # Now our triangles collection has a bunch.  We now
+                    # calculate fluxes for each.
+                    if last == NULL and triangles.first != NULL:
+                        current = triangles.first
+                        last = NULL
+                    elif last != NULL:
+                        current = last.next
+                    while current != NULL:
+                        # Calculate the center of the triangle
+                        wval = 0.0
+                        for n in range(3):
+                            center[n] = 0.0
+                        for n in range(3):
+                            for m in range(3):
+                                point[m] = (current.p[n][m]-cell_pos[m])*idds[m]
+                            # Now we calculate the value at this point
+                            temp = offset_interpolate(dims, point, intdata)
+                            #print "something", temp, point[0], point[1], point[2]
+                            wval += temp
+                            for m in range(3):
+                                center[m] += temp * point[m]
+                        # Now we divide by our normalizing factor
+                        for n in range(3):
+                            center[n] /= wval
+                        # We have our center point of the triangle, in 0..1
+                        # coordinates.  So now we interpolate our three
+                        # fields.
+                        fv[0] = offset_interpolate(dims, center, v1data + offset)
+                        fv[1] = offset_interpolate(dims, center, v2data + offset)
+                        fv[2] = offset_interpolate(dims, center, v3data + offset)
+                        # We interpolate again the actual value data
+                        wval = offset_interpolate(dims, center, fdata + offset)
+                        # Now we have our flux vector and our field value!
+                        # We just need a normal vector with which we can
+                        # dot it.  The normal should be equal to the gradient
+                        # in the center of the triangle, or thereabouts.
+                        eval_gradient(dims, center, intdata, normal)
+                        temp = 0.0
+                        for n in range(3):
+                            temp += normal[n]*normal[n]
+                        # Take the negative, to ensure it points inwardly
+                        temp = -(temp**0.5)
+                        # Dump this somewhere for now
+                        temp = wval * (fv[0] * normal[0] +
+                                       fv[1] * normal[1] +
+                                       fv[2] * normal[2])/temp
+                        # Now we need the area of the triangle.  This will take
+                        # a lot of time to calculate compared to the rest.
+                        # We use Heron's formula.
+                        for n in range(3):
+                            fv[n] = 0.0
+                        for n in range(3):
+                            fv[0] += (current.p[0][n] - current.p[2][n])**2.0
+                            fv[1] += (current.p[1][n] - current.p[0][n])**2.0
+                            fv[2] += (current.p[2][n] - current.p[1][n])**2.0
+                        s = 0.0
+                        for n in range(3):
+                            fv[n] = fv[n]**0.5
+                            s += 0.5 * fv[n]
+                        area = (s*(s-fv[0])*(s-fv[1])*(s-fv[2]))
+                        area = area**0.5
+                        flux += temp*area
+                        last = current
+                        if current.next == NULL: break
+                        current = current.next
+                cell_pos[2] += dds[2]
+            cell_pos[1] += dds[1]
+        cell_pos[0] += dds[0]
+    # Hallo, we are all done.
+    WipeTriangles(triangles.first)
+    return flux
+


diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -132,6 +132,14 @@
     config.add_extension("Interpolators", 
                 ["yt/utilities/_amr_utils/Interpolators.pyx"],
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("marching_cubes", 
+                ["yt/utilities/_amr_utils/marching_cubes.pyx",
+                 "yt/utilities/_amr_utils/FixedInterpolator.c"],
+                libraries=["m"],
+                depends=["yt/utilities/_amr_utils/fp_utils.pxd",
+                         "yt/utilities/_amr_utils/fixed_interpolator.pxd",
+                         "yt/utilities/_amr_utils/FixedInterpolator.h",
+                ])
     config.add_extension("misc_utilities", 
                 ["yt/utilities/_amr_utils/misc_utilities.pyx"],
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
@@ -177,14 +185,17 @@
           )
     config.add_extension("grid_traversal", 
                ["yt/utilities/_amr_utils/grid_traversal.pyx",
-                "yt/utilities/_amr_utils/FixedInterpolator.c"],
+                "yt/utilities/_amr_utils/FixedInterpolator.c",
+                "yt/utilities/_amr_utils/kdtree.c"],
                include_dirs=["yt/utilities/_amr_utils/"],
                libraries=["m"], 
                extra_compile_args=['-fopenmp'],
                extra_link_args=['-fopenmp'],
                depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
                           "yt/utilities/_amr_utils/fp_utils.pxd",
+                          "yt/utilities/_amr_utils/kdtree.h",
                           "yt/utilities/_amr_utils/FixedInterpolator.h",
+                          "yt/utilities/_amr_utils/fixed_interpolator.pxd",
                           ]
           )
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/utilities/amr_utils.py
--- a/yt/utilities/amr_utils.py
+++ b/yt/utilities/amr_utils.py
@@ -36,4 +36,6 @@
 from ._amr_utils.PointsInVolume import *
 from ._amr_utils.QuadTree import *
 from ._amr_utils.RayIntegrators import *
-from ._amr_utils.VolumeIntegrator import *
+from ._amr_utils.grid_traversal import *
+from ._amr_utils.marching_cubes import *
+#from ._amr_utils.VolumeIntegrator import *


diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -32,8 +32,6 @@
                              PlanckTransferFunction, \
                              MultiVariateTransferFunction, \
                              ProjectionTransferFunction
-from yt.utilities.amr_utils import PartitionedGrid, VectorPlane, \
-    TransferFunctionProxy
 from grid_partitioner import HomogenizedVolume, \
                              export_partitioned_grids, \
                              import_partitioned_grids


diff -r 060e702600d11d25f81f33b1aa14f2049f9a0111 -r 2d81eb7134195d431da234bf1fb9c8c8ccf743a9 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -30,9 +30,9 @@
 from .grid_partitioner import HomogenizedVolume
 from .transfer_functions import ProjectionTransferFunction
 
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
-    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
-    arr_ang2pix_nest
+#from yt.utilities.amr_utils import \
+#    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
+#    arr_ang2pix_nest
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \



https://bitbucket.org/yt_analysis/yt/changeset/de26d4dcfc5c/
changeset:   de26d4dcfc5c
branch:      yt
user:        MatthewTurk
date:        2011-12-21 22:45:49
summary:     Merging in latest changes.  They probably should have been in this branch.
affected #:  10 files

diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,7 +41,8 @@
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
     QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
-    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids
+    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids, \
+    slice_grids, cutting_plane_grids, cutting_plane_cells
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -995,9 +996,10 @@
         self.ActiveDimensions = (t.shape[1], 1, 1)
 
     def _get_list_of_grids(self):
-        goodI = ((self.pf.h.grid_right_edge[:,self.axis] > self.coord)
-              &  (self.pf.h.grid_left_edge[:,self.axis] <= self.coord ))
-        self._grids = self.pf.h.grids[goodI] # Using sources not hierarchy
+        gi = slice_grids(self, 
+                self.hierarchy.grid_left_edge,
+                self.hierarchy.grid_right_edge)
+        self._grids = self.hierarchy.grids[gi]
 
     def __cut_mask_child_mask(self, grid):
         mask = grid.child_mask.copy()
@@ -1150,25 +1152,9 @@
         return self._norm_vec
 
     def _get_list_of_grids(self):
-        # Recall that the projection of the distance vector from a point
-        # onto the normal vector of a plane is:
-        # D = (a x_0 + b y_0 + c z_0 + d)/sqrt(a^2+b^2+c^2)
-        # @todo: Convert to using corners
-        LE = self.pf.h.grid_left_edge
-        RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
-                             [RE[:,0],RE[:,1],RE[:,2]],
-                             [LE[:,0],LE[:,1],RE[:,2]],
-                             [RE[:,0],RE[:,1],LE[:,2]],
-                             [LE[:,0],RE[:,1],RE[:,2]],
-                             [RE[:,0],LE[:,1],LE[:,2]],
-                             [LE[:,0],RE[:,1],LE[:,2]],
-                             [RE[:,0],LE[:,1],RE[:,2]]])
-        # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        self.D = D
-        self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+        gridi = cutting_plane_grids(self, self.pf.h.grid_left_edge,
+                                          self.pf.h.grid_right_edge)
+        self._grids = self.hierarchy.grids[gridi.astype("bool")]
 
     @cache_mask
     def _get_cut_mask(self, grid):


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/_amr_utils/fixed_interpolator.pxd
--- /dev/null
+++ b/yt/utilities/_amr_utils/fixed_interpolator.pxd
@@ -0,0 +1,43 @@
+"""
+Fixed interpolator includes
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+
+
+cdef extern from "FixedInterpolator.h":
+    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                  np.float64_t *data) nogil
+    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3],
+                                    np.float64_t *data) nogil
+    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
+                                       np.float64_t *data) nogil
+    void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
+                       np.float64_t grad[3]) nogil
+    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval) nogil
+    void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
+                       np.float64_t vl[3], np.float64_t dds[3],
+                       np.float64_t x, np.float64_t y, np.float64_t z,
+                       int vind1, int vind2) nogil
+


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/_amr_utils/geometry_utils.pyx
--- a/yt/utilities/_amr_utils/geometry_utils.pyx
+++ b/yt/utilities/_amr_utils/geometry_utils.pyx
@@ -26,7 +26,18 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from stdlib cimport malloc, free, abs
+from stdlib cimport malloc, free
+
+cdef extern from "math.h":
+    double exp(double x) nogil
+    float expf(float x) nogil
+    long double expl(long double x) nogil
+    double floor(double x) nogil
+    double ceil(double x) nogil
+    double fmod(double x, double y) nogil
+    double log2(double x) nogil
+    long int lrint(double x) nogil
+    double fabs(double x) nogil
 
 # These routines are separated into a couple different categories:
 #
@@ -110,6 +121,91 @@
             continue
     return gridi
 
+def slice_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
+                      np.ndarray[np.float64_t, ndim=2] right_edges):
+    cdef int i, ax
+    cdef int ng = left_edges.shape[0]
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    ax = dobj.axis
+    cdef np.float64_t coord = dobj.coord
+    for i in range(ng):
+        if left_edges[i, ax] <= coord and \
+           right_edges[i, ax] > coord:
+            gridi[i] = 1
+    return gridi
+
+def cutting_plane_grids(dobj, np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges):
+    cdef int i
+    cdef int ng = left_edges.shape[0]
+    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    cdef np.float64_t *arr[2]
+    arr[0] = <np.float64_t *> left_edges.data
+    arr[1] = <np.float64_t *> right_edges.data
+    cdef np.float64_t x, y, z
+    cdef np.float64_t norm_vec[3]
+    cdef np.float64_t d = dobj._d # offset to center
+    cdef np.float64_t gd # offset to center
+    cdef np.int64_t all_under, all_over
+    for i in range(3):
+        norm_vec[i] = dobj._norm_vec[i]
+    for i in range(ng):
+        all_under = 1
+        all_over = 1
+        # Check each corner
+        for xi in range(2):
+            x = arr[xi][i * 3 + 0]
+            for yi in range(2):
+                y = arr[yi][i * 3 + 1]
+                for zi in range(2):
+                    z = arr[zi][i * 3 + 2]
+                    gd = ( x*norm_vec[0]
+                         + y*norm_vec[1]
+                         + z*norm_vec[2]) + d
+                    if gd <= 0: all_over = 0
+                    if gd >= 0: all_under = 0
+        if not (all_over == 1 or all_under == 1):
+            gridi[i] = 1
+    return gridi
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline int cutting_plane_cell(
+                        np.float64_t x, np.float64_t y, np.float64_t z,
+                        np.float64_t norm_vec[3], np.float64_t d,
+                        np.float64_t dist):
+    cdef np.float64_t cd = x*norm_vec[0] + y*norm_vec[1] + z*norm_vec[2] + d
+    if fabs(cd) <= dist: return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def cutting_plane_cells(dobj, gobj):
+    cdef np.ndarray[np.int32_t, ndim=3] mask 
+    cdef np.ndarray[np.float64_t, ndim=1] left_edge = gobj.LeftEdge
+    cdef np.ndarray[np.float64_t, ndim=1] dds = gobj.dds
+    cdef int i, j, k
+    cdef np.float64_t x, y, z, dist
+    cdef np.float64_t norm_vec[3]
+    cdef np.float64_t d = dobj._d
+
+    mask = np.zeros(gobj.ActiveDimensions, dtype='int32')
+    for i in range(3): norm_vec[i] = dobj._norm_vec[i]
+    dist = 0.5*(dds[0]*dds[0] + dds[1]*dds[1] + dds[2]*dds[2])**0.5
+    x = left_edge[0] + dds[0] * 0.5
+    for i in range(mask.shape[0]):
+        y = left_edge[1] + dds[1] * 0.5
+        for j in range(mask.shape[1]):
+            z = left_edge[2] + dds[2] * 0.5
+            for k in range(mask.shape[2]):
+                mask[i,j,k] = cutting_plane_cell(x, y, z, norm_vec, d, dist)
+                z += dds[1]
+            y += dds[1]
+        x += dds[0]
+    return mask
+                
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -32,6 +32,7 @@
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 from field_interpolation_tables cimport \
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer
+from fixed_interpolator cimport *
 
 from cython.parallel import prange, parallel, threadid
 
@@ -56,21 +57,6 @@
                 int index[3],
                 void *data) nogil
 
-cdef extern from "FixedInterpolator.h":
-    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
-                                  np.float64_t *data) nogil
-    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3],
-                                    np.float64_t *data) nogil
-    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
-                                       np.float64_t *data) nogil
-    void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
-                       np.float64_t grad[3]) nogil
-    void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval) nogil
-    void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
-                       np.float64_t vl[3], np.float64_t dds[3],
-                       np.float64_t x, np.float64_t y, np.float64_t z,
-                       int vind1, int vind2) nogil
-
 cdef struct VolumeContainer:
     int n_fields
     np.float64_t **data
@@ -84,6 +70,7 @@
     cdef public object my_data
     cdef public object LeftEdge
     cdef public object RightEdge
+    cdef int parent_grid_id
     cdef VolumeContainer *container
 
     @cython.boundscheck(False)
@@ -95,6 +82,7 @@
                   np.ndarray[np.int64_t, ndim=1] dims):
         # The data is likely brought in via a slice, so we copy it
         cdef np.ndarray[np.float64_t, ndim=3] tdata
+        self.parent_grid_id = parent_grid_id
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
         self.container = <VolumeContainer *> \
@@ -324,6 +312,10 @@
     int n_samples
     FieldInterpolationTable *fits
     int field_table_ids[6]
+    np.float64_t star_coeff
+    np.float64_t star_er
+    np.float64_t star_sigma_num
+    kdtree_utils.kdtree *star_list
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -368,10 +360,102 @@
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
 
+cdef class star_kdtree_container:
+    cdef kdtree_utils.kdtree *tree
+    cdef public np.float64_t sigma
+    cdef public np.float64_t coeff
+
+    def __init__(self):
+        self.tree = kdtree_utils.kd_create(3)
+
+    def add_points(self,
+                   np.ndarray[np.float64_t, ndim=1] pos_x,
+                   np.ndarray[np.float64_t, ndim=1] pos_y,
+                   np.ndarray[np.float64_t, ndim=1] pos_z,
+                   np.ndarray[np.float64_t, ndim=2] star_colors):
+        cdef int i, n
+        cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
+        for i in range(pos_x.shape[0]):
+            kdtree_utils.kd_insert3(self.tree,
+                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+
+    def __dealloc__(self):
+        kdtree_utils.kd_free(self.tree)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void volume_render_stars_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    cdef kdtree_utils.kdres *ballq = NULL
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
+    cdef int nstars
+    cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vc.n_fields):
+        slopes[i] = offset_interpolate(vc.dims, dp,
+                        vc.data[i] + offset)
+    cdef np.float64_t temp
+    # Now we get the ball-tree result for the stars near our cell center.
+    for i in range(3):
+        cell_left[i] = index[i] * vc.dds[i] + vc.left_edge[i]
+        pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        local_dds[i] = v_dir[i] * dt
+    ballq = kdtree_utils.kd_nearest_range3(
+        vri.star_list, cell_left[0] + vc.dds[0]*0.5,
+                        cell_left[1] + vc.dds[1]*0.5,
+                        cell_left[2] + vc.dds[2]*0.5,
+                        vri.star_er + 0.9*vc.dds[0])
+                                    # ~0.866 + a bit
+
+    nstars = kdtree_utils.kd_res_size(ballq)
+    for i in range(vc.n_fields):
+        temp = slopes[i]
+        slopes[i] -= offset_interpolate(vc.dims, dp,
+                         vc.data[i] + offset)
+        slopes[i] *= -1.0/vri.n_samples
+        dvs[i] = temp
+    for dti in range(vri.n_samples): 
+        # Now we add the contribution from stars
+        for i in range(nstars):
+            kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
+            colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
+            kdtree_utils.kd_res_next(ballq)
+            gexp = (px - pos[0])*(px - pos[0]) \
+                 + (py - pos[1])*(py - pos[1]) \
+                 + (pz - pos[2])*(pz - pos[2])
+            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
+            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+        for i in range(3):
+            pos[i] += local_dds[i]
+        FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
+                          vri.field_table_ids)
+        for i in range(vc.n_fields):
+            dvs[i] += slopes[i]
+
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
+    cdef kdtree_utils.kdtree **trees
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -381,7 +465,8 @@
                   np.ndarray[np.float64_t, ndim=1] x_vec,
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
-                  tf_obj, n_samples = 10):
+                  tf_obj, n_samples = 10,
+                  star_list = None):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
                                x_vec, y_vec, width)
         cdef int i
@@ -409,9 +494,22 @@
         for i in range(6):
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
+        cdef star_kdtree_container skdc
+        if star_list is None:
+            self.trees = NULL
+        else:
+            self.trees = <kdtree_utils.kdtree **> malloc(
+                sizeof(kdtree_utils.kdtree*) * len(star_list))
+            for i in range(len(star_list)):
+                skdc = star_list[i]
+                self.trees[i] = skdc.tree
 
     def setup(self, PartitionedGrid pg):
-        self.sampler = volume_render_sampler
+        if self.trees == NULL:
+            self.sampler = volume_render_sampler
+        else:
+            self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):
         return


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/_amr_utils/kdtree_utils.pxd
--- a/yt/utilities/_amr_utils/kdtree_utils.pxd
+++ b/yt/utilities/_amr_utils/kdtree_utils.pxd
@@ -35,18 +35,19 @@
     void kd_free(kdtree *tree)
     
     int kd_insert3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z, void *data)
-    kdres *kd_nearest3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z)
+    kdres *kd_nearest3(kdtree *tree, np.float64_t x, np.float64_t y,
+                       np.float64_t z) nogil
 
     kdres *kd_nearest_range3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z,
-                             np.float64_t range)
+                             np.float64_t range) nogil
 
-    void kd_res_free(kdres *set)
-    int kd_res_size(kdres *set)
-    int kd_res_next(kdres *set)
-    void kd_res_rewind(kdres *set)
+    void kd_res_free(kdres *set) nogil
+    int kd_res_size(kdres *set) nogil
+    int kd_res_next(kdres *set) nogil
+    void kd_res_rewind(kdres *set) nogil
 
-    void kd_res_item3(kdres *set, np.float64_t *x, np.float64_t *y, np.float64_t *z)
-    void *kd_res_item_data(kdres *set)
+    void kd_res_item3(kdres *set, np.float64_t *x, np.float64_t *y,
+                      np.float64_t *z) nogil
+    void *kd_res_item_data(kdres *set) nogil
 
     void kd_data_destructor(kdtree *tree, void (*destr)(void*))
-    void *kd_res_item_data(kdres *set)


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/_amr_utils/marching_cubes.pyx
--- /dev/null
+++ b/yt/utilities/_amr_utils/marching_cubes.pyx
@@ -0,0 +1,659 @@
+"""
+Marching cubes implementation
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+cimport cython
+import numpy as np
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from stdlib cimport malloc, free, abs
+from fixed_interpolator cimport *
+
+cdef struct Triangle:
+    Triangle *next
+    np.float64_t p[3][3]
+    np.float64_t val
+
+cdef struct TriangleCollection:
+    int count
+    Triangle *first
+    Triangle *current
+
+cdef Triangle *AddTriangle(Triangle *self,
+                    np.float64_t p0[3], np.float64_t p1[3], np.float64_t p2[3]):
+    cdef Triangle *nn = <Triangle *> malloc(sizeof(Triangle))
+    if self != NULL:
+        self.next = nn
+    cdef int i
+    for i in range(3):
+        nn.p[0][i] = p0[i]
+    for i in range(3):
+        nn.p[1][i] = p1[i]
+    for i in range(3):
+        nn.p[2][i] = p2[i]
+    nn.next = NULL
+    return nn
+
+cdef int CountTriangles(Triangle *first):
+    cdef int count = 0
+    cdef Triangle *this = first
+    while this != NULL:
+        count += 1
+        this = this.next
+    return count
+
+cdef void FillTriangleValues(np.ndarray[np.float64_t, ndim=1] values,
+                             Triangle *first):
+    cdef Triangle *this = first
+    cdef Triangle *last
+    cdef int i = 0
+    while this != NULL:
+        values[i] = this.val
+        i += 1
+        last = this
+        this = this.next
+
+cdef void WipeTriangles(Triangle *first):
+    cdef Triangle *this = first
+    cdef Triangle *last
+    while this != NULL:
+        last = this
+        this = this.next
+        free(last)
+
+cdef void FillAndWipeTriangles(np.ndarray[np.float64_t, ndim=2] vertices,
+                               Triangle *first):
+    cdef int count = 0
+    cdef Triangle *this = first
+    cdef Triangle *last
+    cdef int i, j
+    while this != NULL:
+        for i in range(3):
+            for j in range(3):
+                vertices[count, j] = this.p[i][j]
+            count += 1 # Do it at the end because it's an index
+        last = this
+        this = this.next
+        free(last)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int march_cubes(
+                 np.float64_t gv[8], np.float64_t isovalue,
+                 np.float64_t dds[3],
+                 np.float64_t x, np.float64_t y, np.float64_t z,
+                 TriangleCollection *triangles):
+    cdef int *edge_table=[
+    0x0  , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
+    0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
+    0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
+    0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
+    0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
+    0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
+    0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
+    0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
+    0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
+    0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
+    0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
+    0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
+    0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
+    0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
+    0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
+    0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
+    0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
+    0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
+    0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
+    0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
+    0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
+    0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
+    0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
+    0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
+    0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
+    0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
+    0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
+    0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
+    0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
+    0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
+    0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
+    0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0   ]
+
+    cdef int **tri_table = \
+    [[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1],
+    [3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1],
+    [3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1],
+    [3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1],
+    [9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1],
+    [9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
+    [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1],
+    [8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1],
+    [9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
+    [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1],
+    [3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1],
+    [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1],
+    [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1],
+    [4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
+    [5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1],
+    [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1],
+    [9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
+    [0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
+    [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1],
+    [10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1],
+    [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1],
+    [5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1],
+    [9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1],
+    [0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1],
+    [1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1],
+    [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1],
+    [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1],
+    [2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1],
+    [7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1],
+    [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1],
+    [11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1],
+    [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1],
+    [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1],
+    [11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
+    [1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1],
+    [9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1],
+    [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1],
+    [2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
+    [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1],
+    [6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1],
+    [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1],
+    [6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1],
+    [5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1],
+    [1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
+    [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1],
+    [6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1],
+    [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1],
+    [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1],
+    [3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
+    [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1],
+    [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1],
+    [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1],
+    [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1],
+    [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1],
+    [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1],
+    [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1],
+    [10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1],
+    [10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1],
+    [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1],
+    [1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1],
+    [0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1],
+    [10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1],
+    [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1],
+    [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1],
+    [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1],
+    [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1],
+    [3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1],
+    [6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1],
+    [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1],
+    [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1],
+    [10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1],
+    [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1],
+    [7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1],
+    [7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1],
+    [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1],
+    [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1],
+    [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1],
+    [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1],
+    [0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1],
+    [7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
+    [10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
+    [2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
+    [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1],
+    [7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1],
+    [2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1],
+    [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1],
+    [10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1],
+    [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1],
+    [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1],
+    [7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1],
+    [6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1],
+    [8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1],
+    [6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1],
+    [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1],
+    [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1],
+    [8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1],
+    [0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1],
+    [1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1],
+    [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1],
+    [10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1],
+    [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1],
+    [10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
+    [5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
+    [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1],
+    [9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
+    [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1],
+    [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1],
+    [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1],
+    [7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1],
+    [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1],
+    [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1],
+    [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1],
+    [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1],
+    [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1],
+    [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1],
+    [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1],
+    [6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1],
+    [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1],
+    [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1],
+    [6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1],
+    [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1],
+    [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1],
+    [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1],
+    [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1],
+    [9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1],
+    [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1],
+    [1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1],
+    [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1],
+    [0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1],
+    [5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1],
+    [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1],
+    [11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1],
+    [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1],
+    [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1],
+    [2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1],
+    [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1],
+    [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1],
+    [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1],
+    [1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1],
+    [9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1],
+    [9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1],
+    [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1],
+    [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1],
+    [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1],
+    [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1],
+    [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1],
+    [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1],
+    [9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1],
+    [5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1],
+    [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1],
+    [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1],
+    [8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1],
+    [9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1],
+    [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1],
+    [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1],
+    [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1],
+    [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1],
+    [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1],
+    [11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1],
+    [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1],
+    [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1],
+    [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1],
+    [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1],
+    [1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1],
+    [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1],
+    [4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1],
+    [0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1],
+    [3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1],
+    [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1],
+    [0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1],
+    [9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1],
+    [1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+    [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
+    cdef np.float64_t vertlist[12][3]
+    cdef int cubeindex = 0
+    cdef int n
+    cdef int nt = 0
+    for n in range(8):
+        if gv[n] < isovalue:
+            cubeindex |= (1 << n)
+    if edge_table[cubeindex] == 0:
+        return 0
+    if (edge_table[cubeindex] & 1): # 0,0,0 with 1,0,0
+        vertex_interp(gv[0], gv[1], isovalue, vertlist[0],
+                      dds, x, y, z, 0, 1)
+    if (edge_table[cubeindex] & 2): # 1,0,0 with 1,1,0
+        vertex_interp(gv[1], gv[2], isovalue, vertlist[1],
+                      dds, x, y, z, 1, 2)
+    if (edge_table[cubeindex] & 4): # 1,1,0 with 0,1,0
+        vertex_interp(gv[2], gv[3], isovalue, vertlist[2],
+                      dds, x, y, z, 2, 3)
+    if (edge_table[cubeindex] & 8): # 0,1,0 with 0,0,0
+        vertex_interp(gv[3], gv[0], isovalue, vertlist[3],
+                      dds, x, y, z, 3, 0)
+    if (edge_table[cubeindex] & 16): # 0,0,1 with 1,0,1
+        vertex_interp(gv[4], gv[5], isovalue, vertlist[4],
+                      dds, x, y, z, 4, 5)
+    if (edge_table[cubeindex] & 32): # 1,0,1 with 1,1,1
+        vertex_interp(gv[5], gv[6], isovalue, vertlist[5],
+                      dds, x, y, z, 5, 6)
+    if (edge_table[cubeindex] & 64): # 1,1,1 with 0,1,1
+        vertex_interp(gv[6], gv[7], isovalue, vertlist[6],
+                      dds, x, y, z, 6, 7)
+    if (edge_table[cubeindex] & 128): # 0,1,1 with 0,0,1
+        vertex_interp(gv[7], gv[4], isovalue, vertlist[7],
+                      dds, x, y, z, 7, 4)
+    if (edge_table[cubeindex] & 256): # 0,0,0 with 0,0,1
+        vertex_interp(gv[0], gv[4], isovalue, vertlist[8],
+                      dds, x, y, z, 0, 4)
+    if (edge_table[cubeindex] & 512): # 1,0,0 with 1,0,1
+        vertex_interp(gv[1], gv[5], isovalue, vertlist[9],
+                      dds, x, y, z, 1, 5)
+    if (edge_table[cubeindex] & 1024): # 1,1,0 with 1,1,1
+        vertex_interp(gv[2], gv[6], isovalue, vertlist[10],
+                      dds, x, y, z, 2, 6)
+    if (edge_table[cubeindex] & 2048): # 0,1,0 with 0,1,1
+        vertex_interp(gv[3], gv[7], isovalue, vertlist[11],
+                      dds, x, y, z, 3, 7)
+    n = 0
+    while 1:
+        triangles.current = AddTriangle(triangles.current,
+                    vertlist[tri_table[cubeindex][n  ]],
+                    vertlist[tri_table[cubeindex][n+1]],
+                    vertlist[tri_table[cubeindex][n+2]])
+        triangles.count += 1
+        nt += 1
+        if triangles.first == NULL:
+            triangles.first = triangles.current
+        n += 3
+        if tri_table[cubeindex][n] == -1: break
+    return nt
+    
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def march_cubes_grid(np.float64_t isovalue,
+                     np.ndarray[np.float64_t, ndim=3] values,
+                     np.ndarray[np.int32_t, ndim=3] mask,
+                     np.ndarray[np.float64_t, ndim=1] left_edge,
+                     np.ndarray[np.float64_t, ndim=1] dxs,
+                     obj_sample = None):
+    cdef int dims[3]
+    cdef int i, j, k, n, m, nt
+    cdef int offset
+    cdef np.float64_t gv[8], pos[3], point[3], idds[3]
+    cdef np.float64_t *intdata = NULL
+    cdef np.float64_t *sdata = NULL
+    cdef np.float64_t x, y, z, do_sample
+    cdef np.ndarray[np.float64_t, ndim=3] sample
+    cdef np.ndarray[np.float64_t, ndim=1] sampled
+    cdef TriangleCollection triangles
+    cdef Triangle *last, *current
+    if obj_sample is not None:
+        sample = obj_sample
+        sdata = <np.float64_t *> sample.data
+        do_sample = 1
+    else:
+        do_sample = 0
+    for i in range(3):
+        dims[i] = values.shape[i] - 1
+        idds[i] = 1.0 / dxs[i]
+    triangles.first = triangles.current = NULL
+    last = current = NULL
+    triangles.count = 0
+    cdef np.float64_t *data = <np.float64_t *> values.data
+    cdef np.float64_t *dds = <np.float64_t *> dxs.data
+    pos[0] = left_edge[0]
+    for i in range(dims[0]):
+        pos[1] = left_edge[1]
+        for j in range(dims[1]):
+            pos[2] = left_edge[2]
+            for k in range(dims[2]):
+                if mask[i,j,k] == 1:
+                    offset = i * (dims[1] + 1) * (dims[2] + 1) \
+                           + j * (dims[2] + 1) + k
+                    intdata = data + offset
+                    offset_fill(dims, intdata, gv)
+                    nt = march_cubes(gv, isovalue, dds, pos[0], pos[1], pos[2],
+                                &triangles)
+                    if do_sample == 1 and nt > 0:
+                        # At each triangle's center, sample our secondary field
+                        if last == NULL and triangles.first != NULL:
+                            current = triangles.first
+                            last = NULL
+                        elif last != NULL:
+                            current = last.next
+                        while current != NULL:
+                            for n in range(3):
+                                point[n] = 0.0
+                            for n in range(3):
+                                for m in range(3):
+                                    point[m] += (current.p[n][m]-pos[m])*idds[m]
+                            for n in range(3):
+                                point[n] /= 3.0
+                            current.val = offset_interpolate(dims, point,
+                                                             sdata + offset)
+                            last = current
+                            if current.next == NULL: break
+                            current = current.next
+                pos[2] += dds[2]
+            pos[1] += dds[1]
+        pos[0] += dds[0]
+    # Hallo, we are all done.
+    cdef np.ndarray[np.float64_t, ndim=2] vertices 
+    vertices = np.zeros((triangles.count*3,3), dtype='float64')
+    if do_sample == 1:
+        sampled = np.zeros(triangles.count, dtype='float64')
+        FillTriangleValues(sampled, triangles.first)
+        FillAndWipeTriangles(vertices, triangles.first)
+        return vertices, sampled
+    FillAndWipeTriangles(vertices, triangles.first)
+    return vertices
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def march_cubes_grid_flux(
+                     np.float64_t isovalue,
+                     np.ndarray[np.float64_t, ndim=3] values,
+                     np.ndarray[np.float64_t, ndim=3] v1,
+                     np.ndarray[np.float64_t, ndim=3] v2,
+                     np.ndarray[np.float64_t, ndim=3] v3,
+                     np.ndarray[np.float64_t, ndim=3] flux_field,
+                     np.ndarray[np.int32_t, ndim=3] mask,
+                     np.ndarray[np.float64_t, ndim=1] left_edge,
+                     np.ndarray[np.float64_t, ndim=1] dxs):
+    cdef int dims[3]
+    cdef int i, j, k, n, m
+    cdef int offset
+    cdef np.float64_t gv[8]
+    cdef np.float64_t *intdata = NULL
+    cdef TriangleCollection triangles
+    cdef Triangle *current = NULL
+    cdef Triangle *last = NULL
+    cdef np.float64_t *data = <np.float64_t *> values.data
+    cdef np.float64_t *v1data = <np.float64_t *> v1.data
+    cdef np.float64_t *v2data = <np.float64_t *> v2.data
+    cdef np.float64_t *v3data = <np.float64_t *> v3.data
+    cdef np.float64_t *fdata = <np.float64_t *> flux_field.data
+    cdef np.float64_t *dds = <np.float64_t *> dxs.data
+    cdef np.float64_t flux = 0.0
+    cdef np.float64_t center[3], point[3], wval, temp, area, s
+    cdef np.float64_t cell_pos[3], fv[3], idds[3], normal[3]
+    for i in range(3):
+        dims[i] = values.shape[i] - 1
+        idds[i] = 1.0 / dds[i]
+    triangles.first = triangles.current = NULL
+    triangles.count = 0
+    cell_pos[0] = left_edge[0]
+    for i in range(dims[0]):
+        cell_pos[1] = left_edge[1]
+        for j in range(dims[1]):
+            cell_pos[2] = left_edge[2]
+            for k in range(dims[2]):
+                if mask[i,j,k] == 1:
+                    offset = i * (dims[1] + 1) * (dims[2] + 1) \
+                           + j * (dims[2] + 1) + k
+                    intdata = data + offset
+                    offset_fill(dims, intdata, gv)
+                    march_cubes(gv, isovalue, dds,
+                                cell_pos[0], cell_pos[1], cell_pos[2],
+                                &triangles)
+                    # Now our triangles collection has a bunch.  We now
+                    # calculate fluxes for each.
+                    if last == NULL and triangles.first != NULL:
+                        current = triangles.first
+                        last = NULL
+                    elif last != NULL:
+                        current = last.next
+                    while current != NULL:
+                        # Calculate the center of the triangle
+                        wval = 0.0
+                        for n in range(3):
+                            center[n] = 0.0
+                        for n in range(3):
+                            for m in range(3):
+                                point[m] = (current.p[n][m]-cell_pos[m])*idds[m]
+                            # Now we calculate the value at this point
+                            temp = offset_interpolate(dims, point, intdata)
+                            #print "something", temp, point[0], point[1], point[2]
+                            wval += temp
+                            for m in range(3):
+                                center[m] += temp * point[m]
+                        # Now we divide by our normalizing factor
+                        for n in range(3):
+                            center[n] /= wval
+                        # We have our center point of the triangle, in 0..1
+                        # coordinates.  So now we interpolate our three
+                        # fields.
+                        fv[0] = offset_interpolate(dims, center, v1data + offset)
+                        fv[1] = offset_interpolate(dims, center, v2data + offset)
+                        fv[2] = offset_interpolate(dims, center, v3data + offset)
+                        # We interpolate again the actual value data
+                        wval = offset_interpolate(dims, center, fdata + offset)
+                        # Now we have our flux vector and our field value!
+                        # We just need a normal vector with which we can
+                        # dot it.  The normal should be equal to the gradient
+                        # in the center of the triangle, or thereabouts.
+                        eval_gradient(dims, center, intdata, normal)
+                        temp = 0.0
+                        for n in range(3):
+                            temp += normal[n]*normal[n]
+                        # Take the negative, to ensure it points inwardly
+                        temp = -(temp**0.5)
+                        # Dump this somewhere for now
+                        temp = wval * (fv[0] * normal[0] +
+                                       fv[1] * normal[1] +
+                                       fv[2] * normal[2])/temp
+                        # Now we need the area of the triangle.  This will take
+                        # a lot of time to calculate compared to the rest.
+                        # We use Heron's formula.
+                        for n in range(3):
+                            fv[n] = 0.0
+                        for n in range(3):
+                            fv[0] += (current.p[0][n] - current.p[2][n])**2.0
+                            fv[1] += (current.p[1][n] - current.p[0][n])**2.0
+                            fv[2] += (current.p[2][n] - current.p[1][n])**2.0
+                        s = 0.0
+                        for n in range(3):
+                            fv[n] = fv[n]**0.5
+                            s += 0.5 * fv[n]
+                        area = (s*(s-fv[0])*(s-fv[1])*(s-fv[2]))
+                        area = area**0.5
+                        flux += temp*area
+                        last = current
+                        if current.next == NULL: break
+                        current = current.next
+                cell_pos[2] += dds[2]
+            cell_pos[1] += dds[1]
+        cell_pos[0] += dds[0]
+    # Hallo, we are all done.
+    WipeTriangles(triangles.first)
+    return flux
+


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -126,10 +126,20 @@
                 depends=["yt/utilities/_amr_utils/freetype_includes.h"])
     config.add_extension("geometry_utils", 
                 ["yt/utilities/_amr_utils/geometry_utils.pyx"],
+               extra_compile_args=['-fopenmp'],
+               extra_link_args=['-fopenmp'],
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
     config.add_extension("Interpolators", 
                 ["yt/utilities/_amr_utils/Interpolators.pyx"],
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
+    config.add_extension("marching_cubes", 
+                ["yt/utilities/_amr_utils/marching_cubes.pyx",
+                 "yt/utilities/_amr_utils/FixedInterpolator.c"],
+                libraries=["m"],
+                depends=["yt/utilities/_amr_utils/fp_utils.pxd",
+                         "yt/utilities/_amr_utils/fixed_interpolator.pxd",
+                         "yt/utilities/_amr_utils/FixedInterpolator.h",
+                ])
     config.add_extension("misc_utilities", 
                 ["yt/utilities/_amr_utils/misc_utilities.pyx"],
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
@@ -175,14 +185,17 @@
           )
     config.add_extension("grid_traversal", 
                ["yt/utilities/_amr_utils/grid_traversal.pyx",
-                "yt/utilities/_amr_utils/FixedInterpolator.c"],
+                "yt/utilities/_amr_utils/FixedInterpolator.c",
+                "yt/utilities/_amr_utils/kdtree.c"],
                include_dirs=["yt/utilities/_amr_utils/"],
                libraries=["m"], 
                extra_compile_args=['-fopenmp'],
                extra_link_args=['-fopenmp'],
                depends = ["yt/utilities/_amr_utils/VolumeIntegrator.pyx",
                           "yt/utilities/_amr_utils/fp_utils.pxd",
+                          "yt/utilities/_amr_utils/kdtree.h",
                           "yt/utilities/_amr_utils/FixedInterpolator.h",
+                          "yt/utilities/_amr_utils/fixed_interpolator.pxd",
                           ]
           )
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/utilities/amr_utils.py
--- a/yt/utilities/amr_utils.py
+++ b/yt/utilities/amr_utils.py
@@ -36,4 +36,6 @@
 from ._amr_utils.PointsInVolume import *
 from ._amr_utils.QuadTree import *
 from ._amr_utils.RayIntegrators import *
-from ._amr_utils.VolumeIntegrator import *
+from ._amr_utils.grid_traversal import *
+from ._amr_utils.marching_cubes import *
+#from ._amr_utils.VolumeIntegrator import *


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -32,8 +32,6 @@
                              PlanckTransferFunction, \
                              MultiVariateTransferFunction, \
                              ProjectionTransferFunction
-from yt.utilities.amr_utils import PartitionedGrid, VectorPlane, \
-    TransferFunctionProxy
 from grid_partitioner import HomogenizedVolume, \
                              export_partitioned_grids, \
                              import_partitioned_grids


diff -r d91a8b84ec5ab4eeedda6502baf307da4b7eca12 -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -30,9 +30,9 @@
 from .grid_partitioner import HomogenizedVolume
 from .transfer_functions import ProjectionTransferFunction
 
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
-    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
-    arr_ang2pix_nest
+#from yt.utilities.amr_utils import \
+#    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
+#    arr_ang2pix_nest
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \



https://bitbucket.org/yt_analysis/yt/changeset/670b94f4d158/
changeset:   670b94f4d158
branch:      yt
user:        MatthewTurk
date:        2011-12-22 17:51:35
summary:     Beginning to fix up the HEALpix sampling.
affected #:  3 files

diff -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a -r 670b94f4d1581dbac24ac59d791fc64f8e9c32c2 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -744,3 +744,92 @@
         if enter_t >= 1.0: break
     if return_t != NULL: return_t[0] = exit_t
     return hit
+
+def hp_pix2vec_nest(long nside, long ipix):
+    cdef double v[3]
+    healpix_interface.pix2vec_nest(nside, ipix, v)
+    cdef np.ndarray[np.float64_t, ndim=1] tr = np.empty((3,), dtype='float64')
+    tr[0] = v[0]
+    tr[1] = v[1]
+    tr[2] = v[2]
+    return tr
+
+def arr_pix2vec_nest(long nside,
+                     np.ndarray[np.int64_t, ndim=1] aipix):
+    cdef int n = aipix.shape[0]
+    cdef int i
+    cdef double v[3]
+    cdef long ipix
+    cdef np.ndarray[np.float64_t, ndim=2] tr = np.zeros((n, 3), dtype='float64')
+    for i in range(n):
+        ipix = aipix[i]
+        healpix_interface.pix2vec_nest(nside, ipix, v)
+        tr[i,0] = v[0]
+        tr[i,1] = v[1]
+        tr[i,2] = v[2]
+    return tr
+
+def hp_vec2pix_nest(long nside, double x, double y, double z):
+    cdef double v[3]
+    v[0] = x
+    v[1] = y
+    v[2] = z
+    cdef long ipix
+    healpix_interface.vec2pix_nest(nside, v, &ipix)
+    return ipix
+
+def arr_vec2pix_nest(long nside,
+                     np.ndarray[np.float64_t, ndim=1] x,
+                     np.ndarray[np.float64_t, ndim=1] y,
+                     np.ndarray[np.float64_t, ndim=1] z):
+    cdef int n = x.shape[0]
+    cdef int i
+    cdef double v[3]
+    cdef long ipix
+    cdef np.ndarray[np.int64_t, ndim=1] tr = np.zeros(n, dtype='int64')
+    for i in range(n):
+        v[0] = x[i]
+        v[1] = y[i]
+        v[2] = z[i]
+        healpix_interface.vec2pix_nest(nside, v, &ipix)
+        tr[i] = ipix
+    return tr
+
+def hp_pix2ang_nest(long nside, long ipnest):
+    cdef double theta, phi
+    healpix_interface.pix2ang_nest(nside, ipnest, &theta, &phi)
+    return (theta, phi)
+
+def arr_pix2ang_nest(long nside, np.ndarray[np.int64_t, ndim=1] aipnest):
+    cdef int n = aipnest.shape[0]
+    cdef int i
+    cdef long ipnest
+    cdef np.ndarray[np.float64_t, ndim=2] tr = np.zeros((n, 2), dtype='float64')
+    cdef double theta, phi
+    for i in range(n):
+        ipnest = aipnest[i]
+        healpix_interface.pix2ang_nest(nside, ipnest, &theta, &phi)
+        tr[i,0] = theta
+        tr[i,1] = phi
+    return tr
+
+def hp_ang2pix_nest(long nside, double theta, double phi):
+    cdef long ipix
+    healpix_interface.ang2pix_nest(nside, theta, phi, &ipix)
+    return ipix
+
+def arr_ang2pix_nest(long nside,
+                     np.ndarray[np.float64_t, ndim=1] atheta,
+                     np.ndarray[np.float64_t, ndim=1] aphi):
+    cdef int n = atheta.shape[0]
+    cdef int i
+    cdef long ipnest
+    cdef np.ndarray[np.int64_t, ndim=1] tr = np.zeros(n, dtype='int64')
+    cdef double theta, phi
+    for i in range(n):
+        theta = atheta[i]
+        phi = aphi[i]
+        healpix_interface.ang2pix_nest(nside, theta, phi, &ipnest)
+        tr[i] = ipnest
+    return tr
+


diff -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a -r 670b94f4d1581dbac24ac59d791fc64f8e9c32c2 yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -186,7 +186,8 @@
     config.add_extension("grid_traversal", 
                ["yt/utilities/_amr_utils/grid_traversal.pyx",
                 "yt/utilities/_amr_utils/FixedInterpolator.c",
-                "yt/utilities/_amr_utils/kdtree.c"],
+                "yt/utilities/_amr_utils/kdtree.c"] +
+                 glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
                include_dirs=["yt/utilities/_amr_utils/"],
                libraries=["m"], 
                extra_compile_args=['-fopenmp'],


diff -r de26d4dcfc5c656de04f6b2279ba29ca0d129a6a -r 670b94f4d1581dbac24ac59d791fc64f8e9c32c2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -40,8 +40,9 @@
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from numpy import pi
 
-from yt.utilities._amr_utils.grid_traversal import \
-    PartitionedGrid, ProjectionSampler, VolumeRenderSampler
+from yt.utilities.amr_utils import \
+    PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
+    arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest
 
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
@@ -879,3 +880,112 @@
         image /= vals[:,:,1]
         pf.field_info.pop("temp_weightfield")
     return image
+
+def allsky_projection(pf, center, radius, nside, field, weight = None):
+    r"""Project through a parameter file, off-axis, and return the image plane.
+
+    This function will accept the necessary items to integrate through a volume
+    at an arbitrary angle and return the integrated field of view to the user.
+    Note that if a weight is supplied, it will multiply the pre-interpolated
+    values together, then create cell-centered values, then interpolate within
+    the cell to conduct the integration.
+
+    Parameters
+    ----------
+    pf : `~yt.data_objects.api.StaticOutput`
+        This is the parameter file to volume render.
+    center : array_like
+        The current "center" of the view port -- the focal point for the
+        camera.
+    normal_vector : array_like
+        The vector between the camera position and the center.
+    width : float or list of floats
+        The current width of the image.  If a single float, the volume is
+        cubical, but if not, it is front/back, left/right, top/bottom.
+    resolution : int or list of ints
+        The number of pixels in each direction.
+    field : string
+        The field to project through the volume
+    weight : optional, default None
+        If supplied, the field will be pre-multiplied by this, then divided by
+        the integrated value of this field.  This returns an average rather
+        than a sum.
+    volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
+        The volume to ray cast through.  Can be specified for finer-grained
+        control, but otherwise will be automatically generated.
+    no_ghost: bool, optional
+        Optimization option.  If True, homogenized bricks will
+        extrapolate out from grid instead of interpolating from
+        ghost zones that have to first be calculated.  This can
+        lead to large speed improvements, but at a loss of
+        accuracy/smoothness in resulting image.  The effects are
+        less notable when the transfer function is smooth and
+        broad. Default: True
+
+    Returns
+    -------
+    image : array
+        An (N,N) array of the final integrated values, in float64 form.
+
+    Examples
+    --------
+
+    >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
+                      0.2, N, "Temperature", "Density")
+    >>> write_image(na.log10(image), "offaxis.png")
+
+    """
+    # We manually modify the ProjectionTransferFunction to get it to work the
+    # way we want, with a second field that's also passed through.
+    fields = [field]
+    if weight is not None:
+        # This is a temporary field, which we will remove at the end.
+        pf.field_info.add_field("temp_weightfield",
+            function=lambda a,b:b[field]*b[weight])
+        fields = ["temp_weightfield", weight]
+    nv = 12*nside**2
+    image = na.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    vs *= radius
+    vs.shape = (nv,1,3)
+    uv = na.ones(3, dtype='float64')
+    positions = na.ones((nv, 1, 3), dtype='float64') * center
+    grids = pf.h.sphere(center, radius)._grids
+    sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
+                                image, uv, uv, na.zeros(3, dtype='float64'))
+    pb = get_pbar("Sampling ", len(grids))
+    for i,grid in enumerate(grids):
+        data = [(grid["Density"] * grid.child_mask).astype("float64")]
+        pg = PartitionedGrid(
+            grid.id, data,
+            grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
+        grid.clear_data()
+        sampler(pg)
+        pb.update(i)
+    pb.finish()
+    image = sampler.aimage
+    return image
+    if weight is None:
+        dl = width * pf.units[pf.field_info[field].projection_conversion]
+        image *= dl
+    else:
+        image /= vals[:,:,1]
+        pf.field_info.pop("temp_weightfield")
+    return image
+
+def plot_allsky_healpix(image, nside, fn, label = ""):
+    import matplotlib.figure
+    import matplotlib.backends.backend_agg
+    phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
+    pixi = arr_ang2pix_nest(nside, theta.ravel(), phi.ravel())
+    img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+
+    fig = matplotlib.figure.Figure((10, 5))
+    ax = fig.add_subplot(1,1,1,projection='mollweide')
+    implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+    cb = fig.colorbar(implot, orientation='horizontal')
+    cb.set_label(label)
+    ax.xaxis.set_ticks(())
+    ax.yaxis.set_ticks(())
+    canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
+    canvas.print_figure(fn)



https://bitbucket.org/yt_analysis/yt/changeset/402768a29a7c/
changeset:   402768a29a7c
branch:      yt
user:        MatthewTurk
date:        2011-12-22 19:44:46
summary:     Fixing HEALpix ray tracing with threaded volume renderer.  Scales pretty
poorly.
affected #:  2 files

diff -r 670b94f4d1581dbac24ac59d791fc64f8e9c32c2 -r 402768a29a7cca46f7657122a60a399321c67b72 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -229,17 +229,30 @@
         if self.sampler == NULL: raise RuntimeError
         cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4]
         hit = 0
-        self.calculate_extent(extrema, vc)
-        self.get_start_stop(extrema, iter)
-        iter[0] = iclip(iter[0]-1, 0, im.nv[0]-1)
-        iter[1] = iclip(iter[1]+1, 0, im.nv[0]-1)
-        iter[2] = iclip(iter[2]-1, 0, im.nv[1]-1)
-        iter[3] = iclip(iter[3]+1, 0, im.nv[1]-1)
+        cdef int nx, ny, size
+        if im.vd_strides[0] == -1:
+            self.calculate_extent(extrema, vc)
+            self.get_start_stop(extrema, iter)
+            iter[0] = iclip(iter[0]-1, 0, im.nv[0]-1)
+            iter[1] = iclip(iter[1]+1, 0, im.nv[0]-1)
+            iter[2] = iclip(iter[2]-1, 0, im.nv[1]-1)
+            iter[3] = iclip(iter[3]+1, 0, im.nv[1]-1)
+            nx = (iter[1] - iter[0])
+            ny = (iter[3] - iter[2])
+            size = nx * ny
+        else:
+            nx = im.nv[0]
+            ny = 1
+            iter[0] = iter[1] = iter[2] = iter[3] = 0
+            size = nx
+        #print "Sampling", im.vd_strides[0], size
+        #print im.vp_dir[0], im.vp_dir[3]
+        #print im.vp_pos[0], im.vp_pos[3]
+        #print im.image[0], im.image[3]
+        #print im.vp_dir[(size-1) * 3], im.vp_dir[size * 3 - 1]
+        #print im.vp_pos[(size-1) * 3], im.vp_pos[size * 3 - 1]
+        #print im.image[(size-1) * 3], im.image[size * 3 - 1]
         cdef ImageAccumulator *idata
-        cdef void *data
-        cdef int nx = (iter[1] - iter[0])
-        cdef int ny = (iter[3] - iter[2])
-        cdef int size = nx * ny
         cdef np.float64_t px, py 
         cdef np.float64_t width[3] 
         for i in range(3):
@@ -270,19 +283,17 @@
                 # our rays 
                 v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 for j in prange(size, schedule="dynamic"):
-                    vj = j % ny
-                    vi = (j - vj) / ny + iter[0]
-                    vj = vj + iter[2]
-                    offset = im.vp_strides[0] * vi + im.vp_strides[1] * vj
-                    for i in range(3): v_pos[i] = im.vp_dir[i + offset]
-                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    offset = j * 3
+                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
                     for i in range(3): idata.rgba[i] = im.image[i + offset]
-                    offset = im.vd_strides[0] * vi + im.vd_strides[1] * vj
-                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                    walk_volume(vc, v_pos, v_dir, self.sampler, data)
-                free(v_dir)
+                    walk_volume(vc, v_pos, v_dir, self.sampler, 
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
+                #free(v_dir)
             free(idata)
             free(v_pos)
+        #print self.aimage.max()
         return hit
 
 cdef void projection_sampler(


diff -r 670b94f4d1581dbac24ac59d791fc64f8e9c32c2 -r 402768a29a7cca46f7657122a60a399321c67b72 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -882,13 +882,13 @@
     return image
 
 def allsky_projection(pf, center, radius, nside, field, weight = None):
-    r"""Project through a parameter file, off-axis, and return the image plane.
+    r"""Project through a parameter file, through an allsky-method
+    decomposition from HEALpix, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
-    at an arbitrary angle and return the integrated field of view to the user.
-    Note that if a weight is supplied, it will multiply the pre-interpolated
-    values together, then create cell-centered values, then interpolate within
-    the cell to conduct the integration.
+    over 4pi and return the integrated field of view to the user.  Note that if
+    a weight is supplied, it will multiply the pre-interpolated values
+    together.
 
     Parameters
     ----------
@@ -897,47 +897,35 @@
     center : array_like
         The current "center" of the view port -- the focal point for the
         camera.
-    normal_vector : array_like
-        The vector between the camera position and the center.
-    width : float or list of floats
-        The current width of the image.  If a single float, the volume is
-        cubical, but if not, it is front/back, left/right, top/bottom.
-    resolution : int or list of ints
-        The number of pixels in each direction.
+    radius : float or list of floats
+        The radius to integrate out to of the image.
+    nside : int
+        The HEALpix degree.  The number of rays integrated is 12*(Nside**2)
+        Must be a power of two!
     field : string
         The field to project through the volume
     weight : optional, default None
         If supplied, the field will be pre-multiplied by this, then divided by
         the integrated value of this field.  This returns an average rather
         than a sum.
-    volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
-        The volume to ray cast through.  Can be specified for finer-grained
-        control, but otherwise will be automatically generated.
-    no_ghost: bool, optional
-        Optimization option.  If True, homogenized bricks will
-        extrapolate out from grid instead of interpolating from
-        ghost zones that have to first be calculated.  This can
-        lead to large speed improvements, but at a loss of
-        accuracy/smoothness in resulting image.  The effects are
-        less notable when the transfer function is smooth and
-        broad. Default: True
 
     Returns
     -------
     image : array
-        An (N,N) array of the final integrated values, in float64 form.
+        An ((Nside**2)*12,1,3) array of the final integrated values, in float64 form.
 
     Examples
     --------
 
-    >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
-                      0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> image = allsky_projection(pf, [0.5, 0.5, 0.5], 1.0/pf['mpc'],
+                      32, "Temperature", "Density")
+    >>> plot_allsky_healpix(image, 32, "healpix.png")
 
     """
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
+    center = na.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         pf.field_info.add_field("temp_weightfield",
@@ -955,7 +943,8 @@
                                 image, uv, uv, na.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [(grid["Density"] * grid.child_mask).astype("float64")]
+        data = [(grid[field] * grid.child_mask).astype("float64")
+                for field in fields]
         pg = PartitionedGrid(
             grid.id, data,
             grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
@@ -964,9 +953,8 @@
         pb.update(i)
     pb.finish()
     image = sampler.aimage
-    return image
     if weight is None:
-        dl = width * pf.units[pf.field_info[field].projection_conversion]
+        dl = radius * pf.units[pf.field_info[field].projection_conversion]
         image *= dl
     else:
         image /= vals[:,:,1]



https://bitbucket.org/yt_analysis/yt/changeset/8b310372f2f9/
changeset:   8b310372f2f9
branch:      yt
user:        MatthewTurk
date:        2011-12-23 16:56:26
summary:     Adding first pass at re-doing the off_axis_projection to use the sampler
method.
affected #:  1 file

diff -r 402768a29a7cca46f7657122a60a399321c67b72 -r 8b310372f2f9852c023e612dbba0ca0898f9eeeb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -26,6 +26,7 @@
 import numpy as na
 
 from yt.funcs import *
+from yt.utilities.math_utils import *
 
 from .grid_partitioner import HomogenizedVolume
 from .transfer_functions import ProjectionTransferFunction
@@ -803,7 +804,7 @@
         return (left_camera, right_camera)
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, volume = None, no_ghost = True):
+                        field, weight = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -832,17 +833,6 @@
         If supplied, the field will be pre-multiplied by this, then divided by
         the integrated value of this field.  This returns an average rather
         than a sum.
-    volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
-        The volume to ray cast through.  Can be specified for finer-grained
-        control, but otherwise will be automatically generated.
-    no_ghost: bool, optional
-        Optimization option.  If True, homogenized bricks will
-        extrapolate out from grid instead of interpolating from
-        ghost zones that have to first be calculated.  This can
-        lead to large speed improvements, but at a loss of
-        accuracy/smoothness in resulting image.  The effects are
-        less notable when the transfer function is smooth and
-        broad. Default: True
 
     Returns
     -------
@@ -865,14 +855,45 @@
         pf.field_info.add_field("temp_weightfield",
             function=lambda a,b:b[field]*b[weight])
         fields = ["temp_weightfield", weight]
-        tf = ProjectionTransferFunction(n_fields = 2)
-    tf = ProjectionTransferFunction(n_fields = len(fields))
-    cam = pf.h.camera(center, normal_vector, width, resolution, tf,
-                      fields = fields,
-                      log_fields = [False] * len(fields),
-                      volume = volume, no_ghost = no_ghost)
-    vals = cam.snapshot()
-    image = vals[:,:,0]
+    image = na.zeros((resolution, resolution, len(fields)), dtype='float64',
+                      order='C')
+    normal_vector, north_vector, east_vector = ortho_find(normal_vector)
+    unit_vectors = [north_vector, east_vector, normal_vector]
+    back_center= center - 0.5*width * normal_vector
+    rotp = na.concatenate([na.linalg.pinv(unit_vectors).ravel('F'),
+                           back_center])
+    sampler = ProjectionSampler(
+        rotp, normal_vector * width, back_center,
+        (-width/2, width/2, -width/2, width/2),
+        image, north_vector, east_vector,
+        na.array([width, width, width], dtype='float64'))
+    # Calculate the eight corners of the box
+    # Back corners ...
+    mi = pf.domain_right_edge.copy()
+    ma = pf.domain_left_edge.copy()
+    for off1 in [-1, 1]:
+        for off2 in [-1, 1]:
+            for off3 in [-1, 1]:
+                this_point = (center + width/2.0 * off1 * north_vector
+                                     + width/2.0 * off2 * east_vector
+                                     + width/2.0 * off3 * normal_vector)
+                na.minimum(mi, this_point, mi)
+                na.maximum(ma, this_point, ma)
+    # Now we have a bounding box.
+    grids = pf.h.region(center, mi, ma)._grids
+    print len(grids), len(pf.h.grids)
+    pb = get_pbar("Sampling ", len(grids))
+    for i,grid in enumerate(grids):
+        data = [(grid[field] * grid.child_mask).astype("float64")
+                for field in fields]
+        pg = PartitionedGrid(
+            grid.id, data,
+            grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
+        grid.clear_data()
+        sampler(pg)
+        pb.update(i)
+    pb.finish()
+    image = sampler.aimage
     if weight is None:
         dl = width * pf.units[pf.field_info[field].projection_conversion]
         image *= dl



https://bitbucket.org/yt_analysis/yt/changeset/d3069f699ee6/
changeset:   d3069f699ee6
branch:      yt
user:        MatthewTurk
date:        2011-12-23 18:24:13
summary:     There are a couple places where we assume rgba is 3, so setting it to
len(fields) causes breakages.  Oddly enough these beakages only seem to show up
with threading, likely because you have some threads working on vectors and
assigning incorrect values prior to the collection/setting of vectors in the
next sequence.  I guess?
affected #:  1 file

diff -r 8b310372f2f9852c023e612dbba0ca0898f9eeeb -r d3069f699ee60a8956b19e25661287dbb58e810c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -855,7 +855,7 @@
         pf.field_info.add_field("temp_weightfield",
             function=lambda a,b:b[field]*b[weight])
         fields = ["temp_weightfield", weight]
-    image = na.zeros((resolution, resolution, len(fields)), dtype='float64',
+    image = na.zeros((resolution, resolution, 3), dtype='float64',
                       order='C')
     normal_vector, north_vector, east_vector = ortho_find(normal_vector)
     unit_vectors = [north_vector, east_vector, normal_vector]



https://bitbucket.org/yt_analysis/yt/changeset/5604b4fc94e0/
changeset:   5604b4fc94e0
branch:      yt
user:        MatthewTurk
date:        2011-12-23 19:33:54
summary:     Fixing up off-axis projection to work with weight fields correctly.
affected #:  2 files

diff -r d3069f699ee60a8956b19e25661287dbb58e810c -r 5604b4fc94e069a4c36b140e9fb9e680172bd6f4 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -233,10 +233,10 @@
         if im.vd_strides[0] == -1:
             self.calculate_extent(extrema, vc)
             self.get_start_stop(extrema, iter)
-            iter[0] = iclip(iter[0]-1, 0, im.nv[0]-1)
-            iter[1] = iclip(iter[1]+1, 0, im.nv[0]-1)
-            iter[2] = iclip(iter[2]-1, 0, im.nv[1]-1)
-            iter[3] = iclip(iter[3]+1, 0, im.nv[1]-1)
+            iter[0] = iclip(iter[0]-1, 0, im.nv[0])
+            iter[1] = iclip(iter[1]+1, 0, im.nv[0])
+            iter[2] = iclip(iter[2]-1, 0, im.nv[1])
+            iter[3] = iclip(iter[3]+1, 0, im.nv[1])
             nx = (iter[1] - iter[0])
             ny = (iter[3] - iter[2])
             size = nx * ny
@@ -245,13 +245,6 @@
             ny = 1
             iter[0] = iter[1] = iter[2] = iter[3] = 0
             size = nx
-        #print "Sampling", im.vd_strides[0], size
-        #print im.vp_dir[0], im.vp_dir[3]
-        #print im.vp_pos[0], im.vp_pos[3]
-        #print im.image[0], im.image[3]
-        #print im.vp_dir[(size-1) * 3], im.vp_dir[size * 3 - 1]
-        #print im.vp_pos[(size-1) * 3], im.vp_pos[size * 3 - 1]
-        #print im.image[(size-1) * 3], im.image[size * 3 - 1]
         cdef ImageAccumulator *idata
         cdef np.float64_t px, py 
         cdef np.float64_t width[3] 


diff -r d3069f699ee60a8956b19e25661287dbb58e810c -r 5604b4fc94e069a4c36b140e9fb9e680172bd6f4 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -852,8 +852,13 @@
     fields = [field]
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
+        def _make_wf(f, w):
+            def temp_weightfield(a, b):
+                tr = b[f].astype("float64") * b[w]
+                return tr
+            return temp_weightfield
         pf.field_info.add_field("temp_weightfield",
-            function=lambda a,b:b[field]*b[weight])
+            function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     image = na.zeros((resolution, resolution, 3), dtype='float64',
                       order='C')
@@ -898,9 +903,9 @@
         dl = width * pf.units[pf.field_info[field].projection_conversion]
         image *= dl
     else:
-        image /= vals[:,:,1]
+        image[:,:,0] /= image[:,:,1]
         pf.field_info.pop("temp_weightfield")
-    return image
+    return image[:,:,0]
 
 def allsky_projection(pf, center, radius, nside, field, weight = None):
     r"""Project through a parameter file, through an allsky-method



https://bitbucket.org/yt_analysis/yt/changeset/f43257a218f3/
changeset:   f43257a218f3
branch:      yt
user:        MatthewTurk
date:        2011-12-23 21:37:32
summary:     Add a new healpix pixelization routine and fix up the healpix allsky projection
to allow for vector rotation.
affected #:  2 files

diff -r 5604b4fc94e069a4c36b140e9fb9e680172bd6f4 -r f43257a218f3b1077231819faad188b7371a6048 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -46,6 +46,10 @@
     double log2(double x) nogil
     long int lrint(double x) nogil
     double fabs(double x) nogil
+    double atan2(double y, double x) nogil
+    double acos(double x) nogil
+    double cos(double x) nogil
+    double sin(double x) nogil
 
 cdef struct VolumeContainer
 ctypedef void sample_function(
@@ -255,14 +259,15 @@
             idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
             idata.supp_data = self.supp_data
             v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
             if im.vd_strides[0] == -1:
                 for j in prange(size, schedule="dynamic"):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]
                     # Dynamically calculate the position
-                    px = width[0] * (<float>vi)/(<float>im.nv[0]-1) - width[0]/2.0
-                    py = width[1] * (<float>vj)/(<float>im.nv[1]-1) - width[1]/2.0
+                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
                     v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
                     v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
                     v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
@@ -274,7 +279,6 @@
             else:
                 # If we do not have a simple image plane, we have to cast all
                 # our rays 
-                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 for j in prange(size, schedule="dynamic"):
                     offset = j * 3
                     for i in range(3): v_pos[i] = im.vp_pos[i + offset]
@@ -283,7 +287,7 @@
                     walk_volume(vc, v_pos, v_dir, self.sampler, 
                                 (<void *> idata))
                     for i in range(3): im.image[i + offset] = idata.rgba[i]
-                #free(v_dir)
+            free(v_dir)
             free(idata)
             free(v_pos)
         #print self.aimage.max()
@@ -837,3 +841,51 @@
         tr[i] = ipnest
     return tr
 
+#@cython.boundscheck(False)
+ at cython.cdivision(False)
+#@cython.wraparound(False)
+def pixelize_healpix(long nside,
+                     np.ndarray[np.float64_t, ndim=1] values,
+                     long ntheta, long nphi,
+                     np.ndarray[np.float64_t, ndim=2] irotation):
+    # We will first to pix2vec, rotate, then calculate the angle
+    cdef int i, j, thetai, phii
+    cdef long ipix
+    cdef double v0[3], v1[3]
+    cdef double pi = 3.1415926
+    cdef np.float64_t pi2 = pi/2.0
+    cdef np.float64_t phi, theta
+    cdef np.ndarray[np.float64_t, ndim=2] results
+    cdef np.ndarray[np.int32_t, ndim=2] count
+    results = np.zeros((ntheta, nphi), dtype="float64")
+    count = np.zeros((ntheta, nphi), dtype="int32")
+
+    cdef np.float64_t phi0 = 0
+    cdef np.float64_t dphi = 2.0 * pi/nphi
+
+    cdef np.float64_t theta0 = 0
+    cdef np.float64_t dtheta = pi/ntheta
+    # We assume these are the rotated theta and phi
+    for thetai in range(ntheta):
+        theta = theta0 + dtheta * thetai
+        for phii in range(nphi):
+            phi = phi0 + dphi * phii
+            # We have our rotated vector
+            v1[0] = cos(phi) * sin(theta)
+            v1[1] = sin(phi) * sin(theta)
+            v1[2] = cos(theta)
+            # Now we rotate back
+            for i in range(3):
+                v0[i] = 0
+                for j in range(3):
+                    v0[i] += v1[j] * irotation[j,i]
+            # Get the pixel this vector is inside
+            healpix_interface.vec2pix_nest(nside, v0, &ipix)
+            results[thetai, phii] = values[ipix]
+            count[i, j] += 1
+    return results, count
+    for i in range(ntheta):
+        for j in range(nphi):
+            if count[i,j] > 0:
+                results[i,j] /= count[i,j]
+    return results, count


diff -r 5604b4fc94e069a4c36b140e9fb9e680172bd6f4 -r f43257a218f3b1077231819faad188b7371a6048 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -43,7 +43,8 @@
 
 from yt.utilities.amr_utils import \
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
-    arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest
+    arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest, \
+    pixelize_healpix
 
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
@@ -954,8 +955,13 @@
     center = na.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
+        def _make_wf(f, w):
+            def temp_weightfield(a, b):
+                tr = b[f].astype("float64") * b[w]
+                return tr
+            return temp_weightfield
         pf.field_info.add_field("temp_weightfield",
-            function=lambda a,b:b[field]*b[weight])
+            function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
     image = na.zeros((nv,1,3), dtype='float64', order='C')
@@ -983,23 +989,27 @@
         dl = radius * pf.units[pf.field_info[field].projection_conversion]
         image *= dl
     else:
-        image /= vals[:,:,1]
+        image[:,:,0] /= image[:,:,1]
         pf.field_info.pop("temp_weightfield")
-    return image
+    return image[:,0,0]
 
-def plot_allsky_healpix(image, nside, fn, label = ""):
+def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
+                        take_log = True, resolution=512):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
-    pixi = arr_ang2pix_nest(nside, theta.ravel(), phi.ravel())
-    img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+    if rotation is None: rotation = na.eye(3).astype("float64")
+
+    img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='mollweide')
-    implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+    if take_log: func = na.log10
+    else: func = lambda a: a
+    implot = ax.imshow(func(img), extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
     ax.xaxis.set_ticks(())
     ax.yaxis.set_ticks(())
     canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
     canvas.print_figure(fn)
+    return img, count



https://bitbucket.org/yt_analysis/yt/changeset/ef8f2474aea1/
changeset:   ef8f2474aea1
branch:      yt
user:        MatthewTurk
date:        2011-12-27 15:48:00
summary:     Adding a HEALpix->Aitoff function.
affected #:  2 files

diff -r f43257a218f3b1077231819faad188b7371a6048 -r ef8f2474aea12bb81afde7a3cfb0b6c2aee7095c yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -46,10 +46,13 @@
     double log2(double x) nogil
     long int lrint(double x) nogil
     double fabs(double x) nogil
+    double atan(double x) nogil
     double atan2(double y, double x) nogil
     double acos(double x) nogil
+    double asin(double x) nogil
     double cos(double x) nogil
     double sin(double x) nogil
+    double sqrt(double x) nogil
 
 cdef struct VolumeContainer
 ctypedef void sample_function(
@@ -841,9 +844,9 @@
         tr[i] = ipnest
     return tr
 
-#@cython.boundscheck(False)
+ at cython.boundscheck(False)
 @cython.cdivision(False)
-#@cython.wraparound(False)
+ at cython.wraparound(False)
 def pixelize_healpix(long nside,
                      np.ndarray[np.float64_t, ndim=1] values,
                      long ntheta, long nphi,
@@ -861,10 +864,10 @@
     count = np.zeros((ntheta, nphi), dtype="int32")
 
     cdef np.float64_t phi0 = 0
-    cdef np.float64_t dphi = 2.0 * pi/nphi
+    cdef np.float64_t dphi = 2.0 * pi/(nphi-1)
 
     cdef np.float64_t theta0 = 0
-    cdef np.float64_t dtheta = pi/ntheta
+    cdef np.float64_t dtheta = pi/(ntheta-1)
     # We assume these are the rotated theta and phi
     for thetai in range(ntheta):
         theta = theta0 + dtheta * thetai
@@ -889,3 +892,30 @@
             if count[i,j] > 0:
                 results[i,j] /= count[i,j]
     return results, count
+
+def healpix_aitoff_proj(np.ndarray[np.float64_t, ndim=1] pix_image,
+                        long nside,
+                        np.ndarray[np.float64_t, ndim=2] image):
+    cdef double pi = np.pi
+    cdef int i, j
+    cdef np.float64_t x, y, z, zb
+    cdef np.float64_t dx, dy, inside
+    dx = 2.0 / (image.shape[1] - 1)
+    dy = 2.0 / (image.shape[0] - 1)
+    cdef np.float64_t s2 = sqrt(2.0)
+    cdef long ipix
+    for i in range(image.shape[1]):
+        x = (-1.0 + i*dx)*s2*2.0
+        for j in range(image.shape[0]):
+            y = (-1.0 + j * dy)*s2
+            zb = (x*x/8.0 + y*y/2.0 - 1.0)
+            if zb > 0: continue
+            z = (1.0 - (x/4.0)**2.0 - (y/2.0)**2.0)
+            z = z**0.5
+            # Longitude
+            phi = 2.0*atan(z*x/(2.0 * (2.0*z*z-1.0))) + pi
+            # Latitude
+            # We shift it into co-latitude
+            theta = asin(z*y) + pi/2.0
+            healpix_interface.ang2pix_nest(nside, theta, phi, &ipix)
+            image[j, i] = pix_image[ipix]


diff -r f43257a218f3b1077231819faad188b7371a6048 -r ef8f2474aea12bb81afde7a3cfb0b6c2aee7095c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -711,7 +711,7 @@
             img = na.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
-            ax = fig.add_subplot(1,1,1,projection='mollweide')
+            ax = fig.add_subplot(1,1,1,projection='hammer')
             implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
             cb.set_label(r"$\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")



https://bitbucket.org/yt_analysis/yt/changeset/0e2f99efb171/
changeset:   0e2f99efb171
branch:      yt
user:        MatthewTurk
date:        2011-12-27 17:43:47
summary:     Provide the option to rotate vectors inside the healpix_aitoff_proj function.
affected #:  1 file

diff -r ef8f2474aea12bb81afde7a3cfb0b6c2aee7095c -r 0e2f99efb171bc3bf79731bf6f1ee0eb955775fc yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -895,11 +895,13 @@
 
 def healpix_aitoff_proj(np.ndarray[np.float64_t, ndim=1] pix_image,
                         long nside,
-                        np.ndarray[np.float64_t, ndim=2] image):
+                        np.ndarray[np.float64_t, ndim=2] image,
+                        np.ndarray[np.float64_t, ndim=2] irotation):
     cdef double pi = np.pi
-    cdef int i, j
+    cdef int i, j, k, l
     cdef np.float64_t x, y, z, zb
     cdef np.float64_t dx, dy, inside
+    cdef double v0[3], v1[3]
     dx = 2.0 / (image.shape[1] - 1)
     dy = 2.0 / (image.shape[0] - 1)
     cdef np.float64_t s2 = sqrt(2.0)
@@ -913,9 +915,18 @@
             z = (1.0 - (x/4.0)**2.0 - (y/2.0)**2.0)
             z = z**0.5
             # Longitude
-            phi = 2.0*atan(z*x/(2.0 * (2.0*z*z-1.0))) + pi
+            phi = (2.0*atan(z*x/(2.0 * (2.0*z*z-1.0))) + pi)
             # Latitude
             # We shift it into co-latitude
-            theta = asin(z*y) + pi/2.0
-            healpix_interface.ang2pix_nest(nside, theta, phi, &ipix)
+            theta = (asin(z*y) + pi/2.0)
+            # Now to account for rotation we translate into vectors
+            v1[0] = cos(phi) * sin(theta)
+            v1[1] = sin(phi) * sin(theta)
+            v1[2] = cos(theta)
+            for k in range(3):
+                v0[k] = 0
+                for l in range(3):
+                    v0[k] += v1[l] * irotation[l,k]
+            healpix_interface.vec2pix_nest(nside, v0, &ipix)
+            #print "Rotated", v0[0], v0[1], v0[2], v1[0], v1[1], v1[2], ipix, pix_image[ipix]
             image[j, i] = pix_image[ipix]



https://bitbucket.org/yt_analysis/yt/changeset/5b44930c2d86/
changeset:   5b44930c2d86
branch:      yt
user:        MatthewTurk
date:        2012-01-04 18:37:58
summary:     Updating walk_volume to have clearer grid intersection calculations.  Still
getting artifacts at points where vectors switch signs.
affected #:  2 files

diff -r 0e2f99efb171bc3bf79731bf6f1ee0eb955775fc -r 5b44930c2d866b8f171808d5624242300cd140fa yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -655,14 +655,17 @@
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.0
     cdef np.float64_t iv_dir[3]
-    cdef np.float64_t intersect[3], tmax[3], tdelta[3]
+    cdef np.float64_t tmax[3], tdelta[3]
     cdef np.float64_t dist, alpha, dt, exit_t
     cdef np.float64_t tr, tl, temp_x, temp_y, dv
     for i in range(3):
+        iv_dir[i] = 1.0/v_dir[i]
+        tdelta[i] = iv_dir[i] * vc.dds[i]
+        if tdelta[i] < 0: tdelta[i] *= -1
         if (v_dir[i] < 0):
             step[i] = -1
         elif (v_dir[i] == 0):
-            step[i] = 1
+            step[i] = 0
             tmax[i] = 1e60
             iv_dir[i] = 1e60
             tdelta[i] = 1e-60
@@ -671,50 +674,52 @@
             step[i] = 1
         x = (i+1) % 3
         y = (i+2) % 3
-        iv_dir[i] = 1.0/v_dir[i]
-        tl = (vc.left_edge[i] - v_pos[i])*iv_dir[i]
-        temp_x = (v_pos[x] + tl*v_dir[x])
-        temp_y = (v_pos[y] + tl*v_dir[y])
-        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
-           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
-           0.0 <= tl and tl < intersect_t:
-            direction = i
-            intersect_t = tl
-        tr = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
-        temp_x = (v_pos[x] + tr*v_dir[x])
-        temp_y = (v_pos[y] + tr*v_dir[y])
-        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
-           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
-           0.0 <= tr and tr < intersect_t:
-            direction = i
-            intersect_t = tr
+        if step[i] > 0:
+            tl = (vc.left_edge[i] - v_pos[i])*iv_dir[i]
+            temp_x = (v_pos[x] + tl*v_dir[x])
+            temp_y = (v_pos[y] + tl*v_dir[y])
+            if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+               vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+               0.0 <= tl and tl < intersect_t:
+                direction = i
+                intersect_t = tl
+        elif step[i] < 0:
+            tr = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
+            temp_x = (v_pos[x] + tr*v_dir[x])
+            temp_y = (v_pos[y] + tr*v_dir[y])
+            if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+               vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+               0.0 <= tr and tr < intersect_t:
+                direction = i
+                intersect_t = tr
     if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
        vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
        vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
         intersect_t = 0.0
+        direction = 3
     if enter_t >= 0.0: intersect_t = enter_t
     if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
     for i in range(3):
-        intersect[i] = v_pos[i] + intersect_t * v_dir[i]
-        cur_ind[i] = <int> floor((intersect[i] +
-                                  step[i]*1e-8*vc.dds[i] -
-                                  vc.left_edge[i])*vc.idds[i])
-        tmax[i] = (((cur_ind[i]+step[i])*vc.dds[i])+
-                    vc.left_edge[i]-v_pos[i])*iv_dir[i]
-        # This deals with the asymmetry in having our indices refer to the
-        # left edge of a cell, but the right edge of the brick being one
-        # extra zone out.
-        if cur_ind[i] == vc.dims[i] and step[i] < 0:
+        # Two things have to be set inside this loop.
+        # cur_ind[i], the current index of the grid cell the ray is in
+        # tmax[i], the 't' until it crosses out of the grid cell
+        if i == direction and step[i] > 0:
+            # Intersection with the left face in this direction
+            cur_ind[i] = 0
+        elif i == direction and step[i] < 0:
+            # Intersection with the right face in this direction
             cur_ind[i] = vc.dims[i] - 1
-        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i]: return 0
+        else:
+            # We are somewhere in the middle
+            temp_x = intersect_t * v_dir[i] + v_pos[i] # current position
+            cur_ind[i] = <int> floor((temp_x - vc.left_edge[i])*vc.idds[i])
         if step[i] > 0:
-            tmax[i] = (((cur_ind[i]+1)*vc.dds[i])
-                        +vc.left_edge[i]-v_pos[i])*iv_dir[i]
-        if step[i] < 0:
-            tmax[i] = (((cur_ind[i]+0)*vc.dds[i])
-                        +vc.left_edge[i]-v_pos[i])*iv_dir[i]
-        tdelta[i] = (vc.dds[i]*iv_dir[i])
-        if tdelta[i] < 0: tdelta[i] *= -1
+            temp_y = (cur_ind[i] + 1) * vc.dds[i] + vc.left_edge[i]
+        elif step[i] < 0:
+            temp_y = cur_ind[i] * vc.dds[i] + vc.left_edge[i]
+        tmax[i] = (temp_y - v_pos[i]) * iv_dir[i]
+        if step[i] == 0:
+            tmax[i] = 1e60
     # We have to jumpstart our calculation
     enter_t = intersect_t
     hit = 0


diff -r 0e2f99efb171bc3bf79731bf6f1ee0eb955775fc -r 5b44930c2d866b8f171808d5624242300cd140fa yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -908,7 +908,8 @@
         pf.field_info.pop("temp_weightfield")
     return image[:,:,0]
 
-def allsky_projection(pf, center, radius, nside, field, weight = None):
+def allsky_projection(pf, center, radius, nside, field, weight = None,
+                      rotation = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -968,14 +969,18 @@
     vs = arr_pix2vec_nest(nside, na.arange(nv))
     vs *= radius
     vs.shape = (nv,1,3)
+    if rotation is not None:
+        vs2 = vs.copy()
+        for i in range(3):
+            vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
+    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
     uv = na.ones(3, dtype='float64')
-    positions = na.ones((nv, 1, 3), dtype='float64') * center
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, na.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [(grid[field] * grid.child_mask).astype("float64")
+        data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,
@@ -1002,7 +1007,7 @@
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
-    ax = fig.add_subplot(1,1,1,projection='mollweide')
+    ax = fig.add_subplot(1,1,1,projection='aitoff')
     if take_log: func = na.log10
     else: func = lambda a: a
     implot = ax.imshow(func(img), extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)



https://bitbucket.org/yt_analysis/yt/changeset/3857ced70f27/
changeset:   3857ced70f27
branch:      yt
user:        MatthewTurk
date:        2012-01-06 13:01:15
summary:     Intermediate commit, which strips out prange and nogil to allow printing of debugging information.  Also includes substantial refactoring of rough areas in walk_volume.
affected #:  2 files

diff -r 5b44930c2d866b8f171808d5624242300cd140fa -r 3857ced70f27652d4e479d8231d4a49f2dec1220 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -45,6 +45,7 @@
     double fmod(double x, double y) nogil
     double log2(double x) nogil
     long int lrint(double x) nogil
+    double nearbyint(double x) nogil
     double fabs(double x) nogil
     double atan(double x) nogil
     double atan2(double y, double x) nogil
@@ -54,16 +55,6 @@
     double sin(double x) nogil
     double sqrt(double x) nogil
 
-cdef struct VolumeContainer
-ctypedef void sample_function(
-                VolumeContainer *vc,
-                np.float64_t v_pos[3],
-                np.float64_t v_dir[3],
-                np.float64_t enter_t,
-                np.float64_t exit_t,
-                int index[3],
-                void *data) nogil
-
 cdef struct VolumeContainer:
     int n_fields
     np.float64_t **data
@@ -73,6 +64,15 @@
     np.float64_t idds[3]
     int dims[3]
 
+ctypedef void sample_function(
+                VolumeContainer *vc,
+                np.float64_t v_pos[3],
+                np.float64_t v_dir[3],
+                np.float64_t enter_t,
+                np.float64_t exit_t,
+                int index[3],
+                void *data) nogil
+
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object LeftEdge
@@ -82,6 +82,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     def __cinit__(self,
                   int parent_grid_id, data,
                   np.ndarray[np.float64_t, ndim=1] left_edge,
@@ -102,7 +103,7 @@
             c.right_edge[i] = right_edge[i]
             c.dims[i] = dims[i]
             c.dds[i] = (c.right_edge[i] - c.left_edge[i])/dims[i]
-            c.idds[i] = 1.0/c.dds[i]
+            c.idds[i] = c.dds[i]**-1.0
         self.my_data = data
         c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
         for i in range(n_fields):
@@ -258,41 +259,40 @@
         for i in range(3):
             width[i] = self.width[i]
         #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
-        with nogil, parallel():
-            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-            idata.supp_data = self.supp_data
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            if im.vd_strides[0] == -1:
-                for j in prange(size, schedule="dynamic"):
-                    vj = j % ny
-                    vi = (j - vj) / ny + iter[0]
-                    vj = vj + iter[2]
-                    # Dynamically calculate the position
-                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                    for i in range(3): idata.rgba[i] = im.image[i + offset]
-                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata))
-                    for i in range(3): im.image[i + offset] = idata.rgba[i]
-            else:
-                # If we do not have a simple image plane, we have to cast all
-                # our rays 
-                for j in prange(size, schedule="dynamic"):
-                    offset = j * 3
-                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                    for i in range(3): idata.rgba[i] = im.image[i + offset]
-                    walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata))
-                    for i in range(3): im.image[i + offset] = idata.rgba[i]
-            free(v_dir)
-            free(idata)
-            free(v_pos)
+        idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+        idata.supp_data = self.supp_data
+        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        if im.vd_strides[0] == -1:
+            for j in range(size):
+                vj = j % ny
+                vi = (j - vj) / ny + iter[0]
+                vj = vj + iter[2]
+                # Dynamically calculate the position
+                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                for i in range(3): idata.rgba[i] = im.image[i + offset]
+                walk_volume(vc, v_pos, im.vp_dir, self.sampler,
+                            (<void *> idata))
+                for i in range(3): im.image[i + offset] = idata.rgba[i]
+        else:
+            # If we do not have a simple image plane, we have to cast all
+            # our rays 
+            for j in range(size):
+                offset = j * 3
+                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                for i in range(3): idata.rgba[i] = im.image[i + offset]
+                walk_volume(vc, v_pos, v_dir, self.sampler, 
+                            (<void *> idata))
+                for i in range(3):  im.image[i + offset] = idata.rgba[i]
+        free(v_dir)
+        free(idata)
+        free(v_pos)
         #print self.aimage.max()
         return hit
 
@@ -307,10 +307,7 @@
     cdef ImageAccumulator *im = <ImageAccumulator *> data
     cdef int i
     cdef np.float64_t dl = (exit_t - enter_t)
-    # We need this because by default it assumes vertex-centered data.
-    for i in range(3):
-        if index[i] < 0 or index[i] >= vc.dims[i]: return
-    cdef int di = (index[0]*(vc.dims[1])+index[1])*vc.dims[2]+index[2]
+    cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2]
     for i in range(imin(3, vc.n_fields)):
         im.rgba[i] += vc.data[i][di] * dl
 
@@ -651,58 +648,50 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0) nogil:
+                     np.float64_t enter_t = -1.0):
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
-    cdef np.float64_t intersect_t = 1.0
+    cdef np.float64_t intersect_t = 1.1
     cdef np.float64_t iv_dir[3]
     cdef np.float64_t tmax[3], tdelta[3]
     cdef np.float64_t dist, alpha, dt, exit_t
     cdef np.float64_t tr, tl, temp_x, temp_y, dv
-    for i in range(3):
-        iv_dir[i] = 1.0/v_dir[i]
-        tdelta[i] = iv_dir[i] * vc.dds[i]
-        if tdelta[i] < 0: tdelta[i] *= -1
-        if (v_dir[i] < 0):
-            step[i] = -1
-        elif (v_dir[i] == 0):
-            step[i] = 0
-            tmax[i] = 1e60
-            iv_dir[i] = 1e60
-            tdelta[i] = 1e-60
-            continue
-        else:
-            step[i] = 1
-        x = (i+1) % 3
-        y = (i+2) % 3
-        if step[i] > 0:
-            tl = (vc.left_edge[i] - v_pos[i])*iv_dir[i]
-            temp_x = (v_pos[x] + tl*v_dir[x])
-            temp_y = (v_pos[y] + tl*v_dir[y])
-            if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
-               vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
-               0.0 <= tl and tl < intersect_t:
-                direction = i
-                intersect_t = tl
-        elif step[i] < 0:
-            tr = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
-            temp_x = (v_pos[x] + tr*v_dir[x])
-            temp_y = (v_pos[y] + tr*v_dir[y])
-            if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
-               vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
-               0.0 <= tr and tr < intersect_t:
-                direction = i
-                intersect_t = tr
+    direction = -1
     if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
        vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \
        vc.left_edge[2] <= v_pos[2] and v_pos[2] <= vc.right_edge[2]:
         intersect_t = 0.0
         direction = 3
+    for i in range(3):
+        if (v_dir[i] < 0):
+            step[i] = -1
+        elif (v_dir[i] == 0.0):
+            step[i] = 0
+            continue
+        else:
+            step[i] = 1
+        iv_dir[i] = 1.0/v_dir[i]
+        if direction == 3: continue
+        x = (i+1) % 3
+        y = (i+2) % 3
+        if step[i] > 0:
+            tl = (vc.left_edge[i] - v_pos[i])*iv_dir[i]
+        else:
+            tl = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
+        temp_x = (v_pos[x] + tl*v_dir[x])
+        temp_y = (v_pos[y] + tl*v_dir[y])
+        if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
+           vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
+           0.0 <= tl and tl < intersect_t:
+            direction = i
+            intersect_t = tl
     if enter_t >= 0.0: intersect_t = enter_t
-    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)):
+        return 0
     for i in range(3):
         # Two things have to be set inside this loop.
         # cur_ind[i], the current index of the grid cell the ray is in
         # tmax[i], the 't' until it crosses out of the grid cell
+        tdelta[i] = step[i] * iv_dir[i] * vc.dds[i]
         if i == direction and step[i] > 0:
             # Intersection with the left face in this direction
             cur_ind[i] = 0
@@ -710,9 +699,10 @@
             # Intersection with the right face in this direction
             cur_ind[i] = vc.dims[i] - 1
         else:
-            # We are somewhere in the middle
+            # We are somewhere in the middle of the face
             temp_x = intersect_t * v_dir[i] + v_pos[i] # current position
-            cur_ind[i] = <int> floor((temp_x - vc.left_edge[i])*vc.idds[i])
+            temp_y = ((temp_x - vc.left_edge[i])*vc.idds[i])
+            cur_ind[i] =  <int> (floor(temp_y))
         if step[i] > 0:
             temp_y = (cur_ind[i] + 1) * vc.dds[i] + vc.left_edge[i]
         elif step[i] < 0:
@@ -724,40 +714,25 @@
     enter_t = intersect_t
     hit = 0
     while 1:
-        # dims here is one less than the dimensions of the data,
-        # but we are tracing on the grid, not on the data...
-        if (not (0 <= cur_ind[0] < vc.dims[0])) or \
-           (not (0 <= cur_ind[1] < vc.dims[1])) or \
-           (not (0 <= cur_ind[2] < vc.dims[2])):
-            break
         hit += 1
         if tmax[0] < tmax[1]:
             if tmax[0] < tmax[2]:
-                exit_t = fmin(tmax[0], 1.0)
-                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
-                cur_ind[0] += step[0]
-                enter_t = tmax[0]
-                tmax[0] += tdelta[0]
+                i = 0
             else:
-                exit_t = fmin(tmax[2], 1.0)
-                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
-                cur_ind[2] += step[2]
-                enter_t = tmax[2]
-                tmax[2] += tdelta[2]
+                i = 2
         else:
             if tmax[1] < tmax[2]:
-                exit_t = fmin(tmax[1], 1.0)
-                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
-                cur_ind[1] += step[1]
-                enter_t = tmax[1]
-                tmax[1] += tdelta[1]
+                i = 1
             else:
-                exit_t = fmin(tmax[2], 1.0)
-                sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
-                cur_ind[2] += step[2]
-                enter_t = tmax[2]
-                tmax[2] += tdelta[2]
-        if enter_t >= 1.0: break
+                i = 2
+        exit_t = fmin(tmax[i], 1.0)
+        assert((tmax[i] - enter_t) * v_dir[i] < 1.8 * vc.dds[i])
+        sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
+        cur_ind[i] += step[i]
+        enter_t = tmax[i]
+        tmax[i] += tdelta[i]
+        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i] or enter_t >= 1.0:
+            break
     if return_t != NULL: return_t[0] = exit_t
     return hit
 


diff -r 5b44930c2d866b8f171808d5624242300cd140fa -r 3857ced70f27652d4e479d8231d4a49f2dec1220 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -984,7 +984,8 @@
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,
-            grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
+            grid.LeftEdge, grid.RightEdge,
+            grid.ActiveDimensions.astype("int64"))
         grid.clear_data()
         sampler(pg)
         pb.update(i)
@@ -996,7 +997,7 @@
     else:
         image[:,:,0] /= image[:,:,1]
         pf.field_info.pop("temp_weightfield")
-    return image[:,0,0]
+    return image[:,0,0], (vs, positions, image)
 
 def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
                         take_log = True, resolution=512):



https://bitbucket.org/yt_analysis/yt/changeset/a839348d1881/
changeset:   a839348d1881
branch:      yt
user:        MatthewTurk
date:        2012-01-06 14:03:29
summary:     Minor formatting changes.  Removed assert.
affected #:  1 file

diff -r 3857ced70f27652d4e479d8231d4a49f2dec1220 -r a839348d1881b8bbe90667ab101d3e1fca58dba6 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -103,7 +103,7 @@
             c.right_edge[i] = right_edge[i]
             c.dims[i] = dims[i]
             c.dds[i] = (c.right_edge[i] - c.left_edge[i])/dims[i]
-            c.idds[i] = c.dds[i]**-1.0
+            c.idds[i] = 1.0/c.dds[i]
         self.my_data = data
         c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
         for i in range(n_fields):
@@ -685,8 +685,7 @@
             direction = i
             intersect_t = tl
     if enter_t >= 0.0: intersect_t = enter_t
-    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)):
-        return 0
+    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
     for i in range(3):
         # Two things have to be set inside this loop.
         # cur_ind[i], the current index of the grid cell the ray is in
@@ -726,7 +725,6 @@
             else:
                 i = 2
         exit_t = fmin(tmax[i], 1.0)
-        assert((tmax[i] - enter_t) * v_dir[i] < 1.8 * vc.dds[i])
         sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
         cur_ind[i] += step[i]
         enter_t = tmax[i]



https://bitbucket.org/yt_analysis/yt/changeset/aacd926ce929/
changeset:   aacd926ce929
branch:      yt
user:        MatthewTurk
date:        2012-01-06 17:49:39
summary:     Adding a random offset to avoid some edge effects.
affected #:  1 file

diff -r a839348d1881b8bbe90667ab101d3e1fca58dba6 -r aacd926ce92917e98b171fc61e66585cffd9a700 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -967,6 +967,7 @@
     nv = 12*nside**2
     image = na.zeros((nv,1,3), dtype='float64', order='C')
     vs = arr_pix2vec_nest(nside, na.arange(nv))
+    vs += na.random.random(vs.shape)*1e-10 - 0.5e-10
     vs *= radius
     vs.shape = (nv,1,3)
     if rotation is not None:



https://bitbucket.org/yt_analysis/yt/changeset/2477a2f83114/
changeset:   2477a2f83114
branch:      yt
user:        MatthewTurk
date:        2012-01-06 18:28:50
summary:     ADding a quick fix for starting on cell boundaries.
affected #:  1 file

diff -r aacd926ce92917e98b171fc61e66585cffd9a700 -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -710,6 +710,11 @@
         if step[i] == 0:
             tmax[i] = 1e60
     # We have to jumpstart our calculation
+    for i in range(3):
+        if cur_ind[i] == vc.dims[i] and step[i] == 1:
+            return 0
+        if cur_ind[i] == -1 and step[i] == -1:
+            return 0
     enter_t = intersect_t
     hit = 0
     while 1:



https://bitbucket.org/yt_analysis/yt/changeset/6251ac233b88/
changeset:   6251ac233b88
branch:      yt
user:        MatthewTurk
date:        2012-01-10 16:13:48
summary:     Turning back on OpenMP
affected #:  1 file

diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 6251ac233b88bee5ddff33ecefb4da3b179eacce yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -90,6 +90,7 @@
                   np.ndarray[np.int64_t, ndim=1] dims):
         # The data is likely brought in via a slice, so we copy it
         cdef np.ndarray[np.float64_t, ndim=3] tdata
+        self.container = NULL
         self.parent_grid_id = parent_grid_id
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
@@ -113,7 +114,8 @@
     def __dealloc__(self):
         # The data fields are not owned by the container, they are owned by us!
         # So we don't need to deallocate them.
-        free(self.container.data)
+        if self.container == NULL: return
+        if self.container.data != NULL: free(self.container.data)
         free(self.container)
 
 cdef struct ImageContainer:
@@ -259,40 +261,41 @@
         for i in range(3):
             width[i] = self.width[i]
         #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
-        idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-        idata.supp_data = self.supp_data
-        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        if im.vd_strides[0] == -1:
-            for j in range(size):
-                vj = j % ny
-                vi = (j - vj) / ny + iter[0]
-                vj = vj + iter[2]
-                # Dynamically calculate the position
-                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                for i in range(3): idata.rgba[i] = im.image[i + offset]
-                walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                            (<void *> idata))
-                for i in range(3): im.image[i + offset] = idata.rgba[i]
-        else:
-            # If we do not have a simple image plane, we have to cast all
-            # our rays 
-            for j in range(size):
-                offset = j * 3
-                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                for i in range(3): idata.rgba[i] = im.image[i + offset]
-                walk_volume(vc, v_pos, v_dir, self.sampler, 
-                            (<void *> idata))
-                for i in range(3):  im.image[i + offset] = idata.rgba[i]
-        free(v_dir)
-        free(idata)
-        free(v_pos)
+        with nogil, parallel():
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            if im.vd_strides[0] == -1:
+                for j in prange(size, schedule="dynamic"):
+                    vj = j % ny
+                    vi = (j - vj) / ny + iter[0]
+                    vj = vj + iter[2]
+                    # Dynamically calculate the position
+                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
+            else:
+                # If we do not have a simple image plane, we have to cast all
+                # our rays 
+                for j in prange(size, schedule="dynamic"):
+                    offset = j * 3
+                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    walk_volume(vc, v_pos, v_dir, self.sampler, 
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
+            free(v_dir)
+            free(idata)
+            free(v_pos)
         #print self.aimage.max()
         return hit
 
@@ -648,7 +651,7 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0):
+                     np.float64_t enter_t = -1.0) nogil:
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.1
     cdef np.float64_t iv_dir[3]



https://bitbucket.org/yt_analysis/yt/changeset/d0037d21f2e0/
changeset:   d0037d21f2e0
branch:      yt
user:        MatthewTurk
date:        2012-01-10 16:14:46
summary:     Merging from mainline of development.
affected #:  69 files

diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@
 
 import setuptools
 
-VERSION = "2.3dev"
+VERSION = "2.4dev"
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -32,6 +32,7 @@
 import numpy as na
 import random
 import sys
+import os.path as path
 from collections import defaultdict
 
 from yt.funcs import *
@@ -1360,15 +1361,16 @@
         # The halos are listed in order in the file.
         lines = file("%s.txt" % self.basename)
         locations = []
+        realpath = path.realpath("%s.txt" % self.basename)
         for line in lines:
             line = line.split()
             # Prepend the hdf5 file names with the full path.
             temp = []
             for item in line[1:]:
-                if item[0] == "/":
-                    temp.append(item)
-                else:
-                    temp.append(self.pf.fullpath + '/' + item)
+                # This assumes that the .txt is in the same place as
+                # the h5 files, which is a good one I think.
+                item = item.split("/")
+                temp.append(path.join(path.dirname(realpath), item[-1]))
             locations.append(temp)
         lines.close()
         return locations


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -86,6 +86,9 @@
 "ChildHaloID3", "ChildHaloFrac3",
 "ChildHaloID4", "ChildHaloFrac4"]
 
+NumNeighbors = 15
+NumDB = 5
+
 class DatabaseFunctions(object):
     # Common database functions so it doesn't have to be repeated.
     def _open_database(self):
@@ -366,9 +369,9 @@
         child_points = na.array(child_points)
         fKD.pos = na.asfortranarray(child_points.T)
         fKD.qv = na.empty(3, dtype='float64')
-        fKD.dist = na.empty(5, dtype='float64')
-        fKD.tags = na.empty(5, dtype='int64')
-        fKD.nn = 5
+        fKD.dist = na.empty(NumNeighbors, dtype='float64')
+        fKD.tags = na.empty(NumNeighbors, dtype='int64')
+        fKD.nn = NumNeighbors
         fKD.sort = True
         fKD.rearrange = True
         create_tree(0)
@@ -395,7 +398,7 @@
                 nIDs.append(n)
             # We need to fill in fake halos if there aren't enough halos,
             # which can happen at high redshifts.
-            while len(nIDs) < 5:
+            while len(nIDs) < NumNeighbors:
                 nIDs.append(-1)
             candidates[row[0]] = nIDs
         
@@ -405,12 +408,12 @@
         self.candidates = candidates
         
         # This stores the masses contributed to each child candidate.
-        self.child_mass_arr = na.zeros(len(candidates)*5, dtype='float64')
+        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors, dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
         for i,halo in enumerate(sorted(candidates)):
             for j, child in enumerate(candidates[halo]):
-                self.child_mass_loc[halo][child] = i*5 + j
+                self.child_mass_loc[halo][child] = i*NumNeighbors + j
 
     def _build_h5_refs(self, filename):
         # For this snapshot, add lists of file names that contain the
@@ -618,8 +621,8 @@
         result = self.cursor.fetchone()
         while result:
             mass = result[0]
-            self.child_mass_arr[mark:mark+5] /= mass
-            mark += 5
+            self.child_mass_arr[mark:mark+NumNeighbors] /= mass
+            mark += NumNeighbors
             result = self.cursor.fetchone()
         
         # Get the global ID for the SnapHaloID=0 from the child, this will
@@ -642,14 +645,15 @@
                 # We need to get the GlobalHaloID for this child.
                 child_globalID = baseChildID + child
                 child_indexes.append(child_globalID)
-                child_per.append(self.child_mass_arr[i*5 + j])
+                child_per.append(self.child_mass_arr[i*NumNeighbors + j])
             # Sort by percentages, desending.
             child_per, child_indexes = zip(*sorted(zip(child_per, child_indexes), reverse=True))
             values = []
-            for pair in zip(child_indexes, child_per):
+            for pair_count, pair in enumerate(zip(child_indexes, child_per)):
+                if pair_count == NumDB: break
                 values.extend([int(pair[0]), float(pair[1])])
             #values.extend([parent_currt, parent_halo])
-            # This has the child ID, child percent listed five times, followed
+            # This has the child ID, child percent listed NumDB times, followed
             # by the currt and this parent halo ID (SnapHaloID).
             #values = tuple(values)
             self.write_values.append(values)
@@ -841,7 +845,7 @@
          [1609, 0.0]]
         """
         parents = []
-        for i in range(5):
+        for i in range(NumDB):
             string = "SELECT GlobalHaloID, ChildHaloFrac%d FROM Halos\
             WHERE ChildHaloID%d=%d;" % (i, i, GlobalHaloID)
             self.cursor.execute(string)


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,5 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    shift_projections, \
+    get_halo_sphere, \
     standard_fields


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -46,7 +46,8 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, \
     parallel_blocking_call, \
-    parallel_root_only
+    parallel_root_only, \
+    parallel_objects
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.visualization.image_writer import write_image
@@ -66,7 +67,7 @@
                  recenter = None,
                  profile_output_dir='radial_profiles', projection_output_dir='projections',
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
-                 velocity_center=['bulk', 'halo'], filter_quantities=['id','center'], 
+                 velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'], 
                  use_critical_density=False):
         r"""Initialize a Halo Profiler object.
         
@@ -184,7 +185,6 @@
         self._halo_filters = []
         self.all_halos = []
         self.filtered_halos = []
-        self._projection_halo_list = []
 
         # Create output directory if specified
         if self.output_dir is not None:
@@ -351,7 +351,8 @@
             
         """
 
-        self.profile_fields.append({'field':field, 'weight_field':weight_field, 'accumulation':accumulation})
+        self.profile_fields.append({'field':field, 'weight_field':weight_field, 
+                                    'accumulation':accumulation})
 
     def add_projection(self, field, weight_field=None, cmap='algae'):
         r"""Make a projection of the specified field.
@@ -453,7 +454,7 @@
 
         # Profile all halos.
         updated_halos = []
-        for halo in self._get_objs('all_halos', round_robin=True):
+        for halo in parallel_objects(self.all_halos, -1):
             # Apply prefilters to avoid profiling unwanted halos.
             filter_result = True
             haloQuantities = {}
@@ -509,7 +510,7 @@
 
     def _get_halo_profile(self, halo, filename, virial_filter=True,
             force_write=False):
-        """Profile a single halo and write profile data to a file.
+        r"""Profile a single halo and write profile data to a file.
         If file already exists, read profile data from file.
         Return a dictionary of id, center, and virial quantities if virial_filter is True.
         """
@@ -527,39 +528,9 @@
                 mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
-            sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
-            if len(sphere._grids) == 0: return None
-            new_sphere = False
-
-            if self.recenter:
-                old = halo['center']
-                if self.recenter in centering_registry:
-                    new_x, new_y, new_z = \
-                        centering_registry[self.recenter](sphere)
-                else:
-                    # user supplied function
-                    new_x, new_y, new_z = self.recenter(sphere)
-                if new_x < self.pf.domain_left_edge[0] or \
-                        new_y < self.pf.domain_left_edge[1] or \
-                        new_z < self.pf.domain_left_edge[2]:
-                    mylog.info("Recentering rejected, skipping halo %d" % \
-                        halo['id'])
-                    return None
-                halo['center'] = [new_x, new_y, new_z]
-                d = self.pf['kpc'] * periodic_dist(old, halo['center'],
-                    self.pf.domain_right_edge - self.pf.domain_left_edge)
-                mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
-                # Expand the halo to account for recentering. 
-                halo['r_max'] += d / 1000 # d is in kpc -> want mpc
-                new_sphere = True
-
-            if new_sphere:
-                # Temporary solution to memory leak.
-                for g in self.pf.h.grids:
-                    g.clear_data()
-                sphere.clear_data()
-                del sphere
-                sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+            # get a sphere object to profile
+            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            if sphere is None: return None
 
             if self._need_bulk_velocity:
                 # Set bulk velocity to zero out radial velocity profiles.
@@ -567,7 +538,9 @@
                     if self.velocity_center[1] == 'halo':
                         sphere.set_field_parameter('bulk_velocity', halo['velocity'])
                     elif self.velocity_center[1] == 'sphere':
-                        sphere.set_field_parameter('bulk_velocity', sphere.quantities['BulkVelocity'](lazy_reader=False, preload=False))
+                        sphere.set_field_parameter('bulk_velocity', 
+                                                   sphere.quantities['BulkVelocity'](lazy_reader=False, 
+                                                                                     preload=False))
                     else:
                         mylog.error("Invalid parameter: VelocityCenter.")
                 elif self.velocity_center[0] == 'max':
@@ -645,18 +618,18 @@
 
         # Get list of halos for projecting.
         if halo_list == 'filtered':
-            self._halo_projection_list = self.filtered_halos
+            halo_projection_list = self.filtered_halos
         elif halo_list == 'all':
-            self._halo_projection_list = self.all_halos
+            halo_projection_list = self.all_halos
         elif isinstance(halo_list, types.StringType):
-            self._halo_projection_list = self._read_halo_list(halo_list)
+            halo_projection_list = self._read_halo_list(halo_list)
         elif isinstance(halo_list, types.ListType):
-            self._halo_projection_list = halo_list
+            halo_projection_list = halo_list
         else:
             mylog.error("Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list.")
             return
 
-        if len(self._halo_projection_list) == 0:
+        if len(halo_projection_list) == 0:
             mylog.error("Halo list for projections is empty.")
             return
 
@@ -665,7 +638,8 @@
             proj_level = self.pf.h.max_level
         else:
             proj_level = int(self.project_at_level)
-        proj_dx = self.pf.units[self.projection_width_units] / self.pf.parameters['TopGridDimensions'][0] / \
+        proj_dx = self.pf.units[self.projection_width_units] / \
+            self.pf.parameters['TopGridDimensions'][0] / \
             (self.pf.parameters['RefineBy']**proj_level)
         projectionResolution = int(self.projection_width / proj_dx)
 
@@ -678,21 +652,25 @@
             my_output_dir = "%s/%s" % (self.pf.fullpath, self.projection_output_dir)
         self.__check_directory(my_output_dir)
 
-        center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] + self.pf.parameters['DomainRightEdge'][w])
+        center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] + 
+                         self.pf.parameters['DomainRightEdge'][w])
                   for w in range(self.pf.parameters['TopGridRank'])]
 
-        for halo in self._get_objs('_halo_projection_list', round_robin=True):
+        for halo in parallel_objects(halo_projection_list, -1):
             if halo is None:
                 continue
             # Check if region will overlap domain edge.
             # Using non-periodic regions is faster than using periodic ones.
-            leftEdge = [(halo['center'][w] - 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
+            leftEdge = [(halo['center'][w] - 
+                         0.5 * self.projection_width/self.pf.units[self.projection_width_units])
                         for w in range(len(halo['center']))]
-            rightEdge = [(halo['center'][w] + 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
+            rightEdge = [(halo['center'][w] + 
+                          0.5 * self.projection_width/self.pf.units[self.projection_width_units])
                          for w in range(len(halo['center']))]
 
             mylog.info("Projecting halo %04d in region: [%f, %f, %f] to [%f, %f, %f]." %
-                       (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2], rightEdge[0], rightEdge[1], rightEdge[2]))
+                       (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2], 
+                        rightEdge[0], rightEdge[1], rightEdge[2]))
 
             need_per = False
             for w in range(len(halo['center'])):
@@ -719,13 +697,13 @@
                 for hp in self.projection_fields:
                     projections.append(self.pf.h.proj(w, hp['field'], 
                                                       weight_field=hp['weight_field'], 
-                                                      data_source=region, center=halo['center'],
+                                                      source=region, center=halo['center'],
                                                       serialize=False))
                 
                 # Set x and y limits, shift image if it overlaps domain boundary.
                 if need_per:
                     pw = self.projection_width/self.pf.units[self.projection_width_units]
-                    #shift_projections(self.pf, projections, halo['center'], center, w)
+                    _shift_projections(self.pf, projections, halo['center'], center, w)
                     # Projection has now been shifted to center of box.
                     proj_left = [center[x_axis]-0.5*pw, center[y_axis]-0.5*pw]
                     proj_right = [center[x_axis]+0.5*pw, center[y_axis]+0.5*pw]
@@ -756,11 +734,85 @@
                         if save_images:
                             filename = "%s/Halo_%04d_%s_%s.png" % (my_output_dir, halo['id'], 
                                                                    dataset_name, axis_labels[w])
-                            write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                            if (frb[hp['field']] != 0).any():
+                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                            else:
+                                mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
+                                            (hp['field'], halo['id']))
                     if save_cube: output.close()
 
             del region
 
+    @parallel_blocking_call
+    def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
+                             analysis_output_dir=None):
+        r"""Perform custom analysis on all halos.
+        
+        This will loop through all halo on the HaloProfiler's list, 
+        creating a sphere object for each halo and passing that sphere 
+        to the provided analysis function.
+        
+        Parameters
+        ---------
+        analysis_function : function
+            A function taking two arguments, the halo dictionary, and a 
+            sphere object.
+            Example function to calculate total mass of halo:
+                def my_analysis(halo, sphere):
+                    total_mass = sphere.quantities['TotalMass']()
+                    print total_mass
+        halo_list : {'filtered', 'all'}
+            Which set of halos to make profiles of, either ones passed by the
+            halo filters (if enabled/added), or all halos.
+            Default='filtered'.
+        analysis_output_dir : string, optional
+            If specified, this directory will be created within the dataset to 
+            contain any output from the analysis function.  Default: None.
+
+        Examples
+        --------
+        >>> hp.analyze_halo_spheres(my_analysis, halo_list="filtered",
+                                    analysis_output_dir='special_analysis')
+        
+        """
+
+        # Get list of halos for projecting.
+        if halo_list == 'filtered':
+            halo_analysis_list = self.filtered_halos
+        elif halo_list == 'all':
+            halo_analysis_list = self.all_halos
+        elif isinstance(halo_list, types.StringType):
+            halo_analysis_list = self._read_halo_list(halo_list)
+        elif isinstance(halo_list, types.ListType):
+            halo_analysis_list = halo_list
+        else:
+            mylog.error("Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list.")
+            return
+
+        if len(halo_analysis_list) == 0:
+            mylog.error("Halo list for analysis is empty.")
+            return
+
+        # Create output directory.
+        if analysis_output_dir is not None:
+            if self.output_dir is not None:
+                self.__check_directory("%s/%s" % (self.output_dir, self.pf.directory))
+                my_output_dir = "%s/%s/%s" % (self.output_dir, self.pf.directory, 
+                                              analysis_output_dir)
+            else:
+                my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
+            self.__check_directory(my_output_dir)
+
+        for halo in parallel_objects(halo_analysis_list, -1):
+            if halo is None: continue
+
+            # Get a sphere object to analze.
+            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            if sphere is None: continue
+
+            # Call the given analysis function.
+            analysis_function(halo, sphere)
+
     def _add_actual_overdensity(self, profile):
         "Calculate overdensity from TotalMassMsun and CellVolume fields."
 
@@ -917,7 +969,8 @@
     def _run_hop(self, hop_file):
         "Run hop to get halos."
 
-        hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args, **self.halo_finder_kwargs)
+        hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args, 
+                                                **self.halo_finder_kwargs)
         hop_results.write_out(hop_file)
 
         del hop_results
@@ -989,7 +1042,95 @@
         else:
             os.mkdir(my_output_dir)
 
-def shift_projections(pf, projections, oldCenter, newCenter, axis):
+def get_halo_sphere(halo, pf, recenter=None):
+    r"""Returns a sphere object for a given halo.
+        
+    With a dictionary containing halo properties, such as center 
+    and r_max, this creates a sphere object and optionally 
+    recenters and recreates the sphere using a recentering function.
+    This is to be used primarily to make spheres for a set of halos 
+    loaded by the HaloProfiler.
+    
+    Parameters
+    ----------
+    halo : dict, required
+        The dictionary containing halo properties used to make the sphere.
+        Required entries:
+            center : list with center coordinates.
+            r_max : sphere radius in Mpc.
+    pf : parameter file object, required
+        The parameter file from which the sphere will be made.
+    recenter : {None, string or function}
+        The exact location of the sphere center can significantly affect 
+        radial profiles.  The halo center loaded by the HaloProfiler will 
+        typically be the dark matter center of mass calculated by a halo 
+        finder.  However, this may not be the best location for centering 
+        profiles of baryon quantities.  For example, one may want to center 
+        on the maximum density.
+        If recenter is given as a string, one of the existing recentering 
+        functions will be used:
+            Min_Dark_Matter_Density : location of minimum dark matter density
+            Max_Dark_Matter_Density : location of maximum dark matter density
+            CoM_Dark_Matter_Density : dark matter center of mass
+            Min_Gas_Density : location of minimum gas density
+            Max_Gas_Density : location of maximum gas density
+            CoM_Gas_Density : gas center of mass
+            Min_Total_Density : location of minimum total density
+            Max_Total_Density : location of maximum total density
+            CoM_Total_Density : total center of mass
+            Min_Temperature : location of minimum temperature
+            Max_Temperature : location of maximum temperature
+        Alternately, a function can be supplied for custom recentering.
+        The function should take only one argument, a sphere object.
+            Example function:
+                def my_center_of_mass(data):
+                   my_x, my_y, my_z = data.quantities['CenterOfMass']()
+                   return (my_x, my_y, my_z)
+
+        Examples: this should primarily be used with the halo list of the HaloProfiler.
+        This is an example with an abstract halo asssuming a pre-defined pf.
+        >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
+        >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
+        >>> # Assuming the above example function has been defined.
+        >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
+    """
+        
+    sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
+    if len(sphere._grids) == 0: return None
+    new_sphere = False
+
+    if recenter:
+        old = halo['center']
+        if recenter in centering_registry:
+            new_x, new_y, new_z = \
+                centering_registry[recenter](sphere)
+        else:
+            # user supplied function
+            new_x, new_y, new_z = recenter(sphere)
+        if new_x < pf.domain_left_edge[0] or \
+                new_y < pf.domain_left_edge[1] or \
+                new_z < pf.domain_left_edge[2]:
+            mylog.info("Recentering rejected, skipping halo %d" % \
+                halo['id'])
+            return None
+        halo['center'] = [new_x, new_y, new_z]
+        d = pf['kpc'] * periodic_dist(old, halo['center'],
+            pf.domain_right_edge - pf.domain_left_edge)
+        mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+        # Expand the halo to account for recentering. 
+        halo['r_max'] += d / 1000 # d is in kpc -> want mpc
+        new_sphere = True
+
+    if new_sphere:
+        # Temporary solution to memory leak.
+        for g in pf.h.grids:
+            g.clear_data()
+        sphere.clear_data()
+        del sphere
+        sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
+    return sphere
+
+def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """
     Shift projection data around.
     This is necessary when projecting a preiodic region.
@@ -1059,14 +1200,19 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
+        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, 
+                                                add2_x_px, add2_y_px])
+        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, 
+                                                add2_x_py, add2_y_py])
+        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, 
+                                                 add2_x_pdx, add2_y_pdx])
+        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, 
+                                                 add2_x_pdy, add2_y_pdy])
+        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, 
+                                                 add2_x_field, add2_y_field])
         plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
-                                                    add_x_weight_field, add_y_weight_field, 
-                                                    add2_x_weight_field, add2_y_weight_field])
+                                                          add_x_weight_field, add_y_weight_field, 
+                                                          add2_x_weight_field, add2_y_weight_field])
 
         # Delete original copies of hanging cells.
         del add_x_px, add_y_px, add2_x_px, add2_y_px


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -96,6 +96,8 @@
             self._pf.current_redshift) # seconds
         # Build the distribution.
         self.build_dist()
+        # Attach some convenience arrays.
+        self.attach_arrays()
 
     def build_dist(self):
         """
@@ -127,6 +129,47 @@
         # We will want the time taken between bins.
         self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
     
+    def attach_arrays(self):
+        """
+        Attach convenience arrays to the class for easy access.
+        """
+        if self.mode == 'data_source':
+            try:
+                vol = self._data_source.volume('mpc')
+            except AttributeError:
+                # If we're here, this is probably a HOPHalo object, and we
+                # can get the volume this way.
+                ds = self._data_source.get_sphere()
+                vol = ds.volume('mpc')
+        elif self.mode == 'provided':
+            vol = self.volume
+        tc = self._pf["Time"]
+        self.time = []
+        self.lookback_time = []
+        self.redshift = []
+        self.Msol_yr = []
+        self.Msol_yr_vol = []
+        self.Msol = []
+        self.Msol_cumulative = []
+        # Use the center of the time_bin, not the left edge.
+        for i, time in enumerate((self.time_bins[1:] + self.time_bins[:-1])/2.):
+            self.time.append(time * tc / YEAR)
+            self.lookback_time.append((self.time_now - time * tc)/YEAR)
+            self.redshift.append(self.cosm.ComputeRedshiftFromTime(time * tc))
+            self.Msol_yr.append(self.mass_bins[i] / \
+                (self.time_bins_dt[i] * tc / YEAR))
+            self.Msol_yr_vol.append(self.mass_bins[i] / \
+                (self.time_bins_dt[i] * tc / YEAR) / vol)
+            self.Msol.append(self.mass_bins[i])
+            self.Msol_cumulative.append(self.cum_mass_bins[i])
+        self.time = na.array(self.time)
+        self.lookback_time = na.array(self.lookback_time)
+        self.redshift = na.array(self.redshift)
+        self.Msol_yr = na.array(self.Msol_yr)
+        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
+        self.Msol = na.array(self.Msol)
+        self.Msol_cumulative = na.array(self.Msol_cumulative)
+    
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
         order.
@@ -150,31 +193,21 @@
         >>> sfr.write_out("stars-SFR.out")
         """
         fp = open(name, "w")
-        if self.mode == 'data_source':
-            try:
-                vol = self._data_source.volume('mpc')
-            except AttributeError:
-                # If we're here, this is probably a HOPHalo object, and we
-                # can get the volume this way.
-                ds = self._data_source.get_sphere()
-                vol = ds.volume('mpc')
-        elif self.mode == 'provided':
-            vol = self.volume
-        tc = self._pf["Time"]
-        # Use the center of the time_bin, not the left edge.
         fp.write("#time\tlookback\tredshift\tMsol/yr\tMsol/yr/Mpc3\tMsol\tcumMsol\t\n")
-        for i, time in enumerate((self.time_bins[1:] + self.time_bins[:-1])/2.):
+        for i, time in enumerate(self.time):
             line = "%1.5e %1.5e %1.5e %1.5e %1.5e %1.5e %1.5e\n" % \
-            (time * tc / YEAR, # Time
-            (self.time_now - time * tc)/YEAR, # Lookback time
-            self.cosm.ComputeRedshiftFromTime(time * tc), # Redshift
-            self.mass_bins[i] / (self.time_bins_dt[i] * tc / YEAR), # Msol/yr
-            self.mass_bins[i] / (self.time_bins_dt[i] * tc / YEAR) / vol, # Msol/yr/vol
-            self.mass_bins[i], # Msol in bin
-            self.cum_mass_bins[i]) # cumulative
+            (time, # Time
+            self.lookback_time[i], # Lookback time
+            self.redshift[i], # Redshift
+            self.Msol_yr[i], # Msol/yr
+            self.Msol_yr_vol[i], # Msol/yr/vol
+            self.Msol[i], # Msol in bin
+            self.Msol_cumulative[i]) # cumulative
             fp.write(line)
         fp.close()
 
+### Begin Synthetic Spectrum Stuff. ####
+
 CHABRIER = {
 "Z0001" : "bc2003_hr_m22_chab_ssp.ised.h5", #/* 0.5% */
 "Z0004" : "bc2003_hr_m32_chab_ssp.ised.h5", #/* 2% */


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -42,6 +42,7 @@
     __global_parallel_size = '1',
     __topcomm_parallel_rank = '0',
     __topcomm_parallel_size = '1',
+    __command_line = 'False',
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -54,6 +54,8 @@
     TrilinearFieldInterpolator
 from yt.utilities.parameter_file_storage import \
     ParameterFileStore
+from yt.utilities.minimal_representation import \
+    MinimalProjectionData
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -89,6 +91,20 @@
         return tr
     return save_state
 
+def restore_field_information_state(func):
+    """
+    A decorator that takes a function with the API of (self, grid, field)
+    and ensures that after the function is called, the field_parameters will
+    be returned to normal.
+    """
+    def save_state(self, grid, field=None, *args, **kwargs):
+        old_params = grid.field_parameters
+        grid.field_parameters = self.field_parameters
+        tr = func(self, grid, field, *args, **kwargs)
+        grid.field_parameters = old_params
+        return tr
+    return save_state
+
 def cache_mask(func):
     """
     For computationally intensive indexing operations, we can cache
@@ -212,7 +228,7 @@
         self._point_indices = {}
         self._vc_data = {}
         for key, val in kwargs.items():
-            mylog.info("Setting %s to %s", key, val)
+            mylog.debug("Setting %s to %s", key, val)
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
@@ -382,9 +398,10 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
-    def __repr__(self):
+    def __repr__(self, clean = False):
         # We'll do this the slow way to be clear what's going on
-        s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        if clean: s = "%s: " % (self.__class__.__name__)
+        else: s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
                        for i in self._con_args])
         return s
@@ -811,6 +828,38 @@
             self[field] = temp_data[field]
 
     def to_frb(self, width, resolution, center = None):
+        r"""This function returns a FixedResolutionBuffer generated from this
+        object.
+
+        A FixedResolutionBuffer is an object that accepts a variable-resolution
+        2D object and transforms it into an NxM bitmap that can be plotted,
+        examined or processed.  This is a convenience function to return an FRB
+        directly from an existing 2D data object.
+
+        Parameters
+        ----------
+        width : width specifier
+            This can either be a floating point value, in the native domain
+            units of the simulation, or a tuple of the (value, unit) style.
+            This will be the width of the FRB.
+        resolution : int or tuple of ints
+            The number of pixels on a side of the final FRB.
+        center : array-like of floats, optional
+            The center of the FRB.  If not specified, defaults to the center of
+            the current object.
+
+        Returns
+        -------
+        frb : :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
+            A fixed resolution buffer, which can be queried for fields.
+
+        Examples
+        --------
+
+        >>> proj = pf.h.proj(0, "Density")
+        >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
+        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        """
         if center is None:
             center = self.get_field_parameter("center")
             if center is None:
@@ -1221,6 +1270,52 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
+    def to_frb(self, width, resolution):
+        r"""This function returns an ObliqueFixedResolutionBuffer generated
+        from this object.
+
+        An ObliqueFixedResolutionBuffer is an object that accepts a
+        variable-resolution 2D object and transforms it into an NxM bitmap that
+        can be plotted, examined or processed.  This is a convenience function
+        to return an FRB directly from an existing 2D data object.  Unlike the
+        corresponding to_frb function for other AMR2DData objects, this does
+        not accept a 'center' parameter as it is assumed to be centered at the
+        center of the cutting plane.
+
+        Parameters
+        ----------
+        width : width specifier
+            This can either be a floating point value, in the native domain
+            units of the simulation, or a tuple of the (value, unit) style.
+            This will be the width of the FRB.
+        resolution : int or tuple of ints
+            The number of pixels on a side of the final FRB.
+
+        Returns
+        -------
+        frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
+            A fixed resolution buffer, which can be queried for fields.
+
+        Examples
+        --------
+
+        >>> v, c = pf.h.find_max("Density")
+        >>> sp = pf.h.sphere(c, (100.0, 'au'))
+        >>> L = sp.quantities["AngularMomentumVector"]()
+        >>> cutting = pf.h.cutting(L, c)
+        >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
+        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        """
+        if iterable(width):
+            w, u = width
+            width = w/self.pf[u]
+        if not iterable(resolution):
+            resolution = (resolution, resolution)
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
+        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
+        return frb
+
 class AMRFixedResCuttingPlaneBase(AMR2DData):
     """
     AMRFixedResCuttingPlaneBase is an oblique plane through the data,
@@ -1516,6 +1611,10 @@
         self._refresh_data()
         if self._okay_to_serialize and self.serialize: self._serialize(node_name=self._node_name)
 
+    @property
+    def _mrep(self):
+        return MinimalProjectionData(self)
+
     def _convert_field_name(self, field):
         if field == "weight_field": return "weight_field_%s" % self._weight
         if field in self._key_fields: return field
@@ -2443,14 +2542,8 @@
         verts = []
         samples = []
         for i, g in enumerate(self._get_grid_objs()):
-            mask = self._get_cut_mask(g) * g.child_mask
-            vals = g.get_vertex_centered_data(field)
-            if sample_values is not None:
-                svals = g.get_vertex_centered_data(sample_values)
-            else:
-                svals = None
-            my_verts = march_cubes_grid(value, vals, mask, g.LeftEdge, g.dds,
-                                        svals)
+            my_verts = self._extract_isocontours_from_grid(
+                            g, field, value, sample_values)
             if sample_values is not None:
                 my_verts, svals = my_verts
                 samples.append(svals)
@@ -2477,6 +2570,20 @@
             return verts, samples
         return verts
 
+
+    @restore_grid_state
+    def _extract_isocontours_from_grid(self, grid, field, value,
+                                       sample_values = None):
+        mask = self._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field)
+        if sample_values is not None:
+            svals = grid.get_vertex_centered_data(sample_values)
+        else:
+            svals = None
+        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
+                                    grid.dds, svals)
+        return my_verts
+
     def calculate_isocontour_flux(self, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
         r"""This identifies isocontours on a cell-by-cell basis, with no
@@ -2543,19 +2650,25 @@
         """
         flux = 0.0
         for g in self._get_grid_objs():
-            mask = self._get_cut_mask(g) * g.child_mask
-            vals = g.get_vertex_centered_data(field)
-            if fluxing_field is None:
-                ff = na.ones(vals.shape, dtype="float64")
-            else:
-                ff = g.get_vertex_centered_data(fluxing_field)
-            xv, yv, zv = [g.get_vertex_centered_data(f) for f in 
-                         [field_x, field_y, field_z]]
-            flux += march_cubes_grid_flux(value, vals, xv, yv, zv,
-                        ff, mask, g.LeftEdge, g.dds)
+            flux += self._calculate_flux_in_grid(g, field, value,
+                    field_x, field_y, field_z, fluxing_field)
         flux = self.comm.mpi_allreduce(flux, op="sum")
         return flux
 
+    @restore_grid_state
+    def _calculate_flux_in_grid(self, grid, field, value,
+                    field_x, field_y, field_z, fluxing_field = None):
+        mask = self._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field)
+        if fluxing_field is None:
+            ff = na.ones(vals.shape, dtype="float64")
+        else:
+            ff = grid.get_vertex_centered_data(fluxing_field)
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+                     [field_x, field_y, field_z]]
+        return march_cubes_grid_flux(value, vals, xv, yv, zv,
+                    ff, mask, grid.LeftEdge, grid.dds)
+
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                 log_space=True, cumulative=True, cache=False):
         """
@@ -2855,12 +2968,6 @@
                  & (r <= self._radius))
         return cm
 
-    def volume(self, unit="unitary"):
-        """
-        Return the volume of the cylinder in units of *unit*.
-        """
-        return math.pi * (self._radius)**2. * self._height * pf[unit]**3
-
 class AMRInclinedBox(AMR3DData):
     _type_name="inclined_box"
     _con_args = ('origin','box_vectors')
@@ -3430,7 +3537,7 @@
                                    output_field, output_left)
             self.field_data[field] = output_field
 
-    @restore_grid_state
+    @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
         fields = ensure_list(fields)
         g_fields = [grid[field].astype("float64") for field in fields]
@@ -3523,6 +3630,19 @@
                     self._some_overlap.append(grid)
                     continue
     
+    def __repr__(self):
+        # We'll do this the slow way to be clear what's going on
+        s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        s += "["
+        for i, region in enumerate(self.regions):
+            if region in ["OR", "AND", "NOT", "(", ")"]:
+                s += region
+            else:
+                s += region.__repr__(clean = True)
+            if i < (len(self.regions) - 1): s += ", "
+        s += "]"
+        return s
+    
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -133,7 +133,6 @@
             if weight:
                 f[u] /= w[u]
             self[field] = f
-        self["myweight"] = w
         self["UsedBins"] = u
 
     def add_fields(self, fields, weight = "CellMassMsun", accumulation = False, fractional=False):


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -37,6 +37,8 @@
     output_type_registry
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
+from yt.utilities.minimal_representation import \
+    MinimalStaticOutput
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -115,6 +117,10 @@
         except ImportError:
             return s.replace(";", "*")
 
+    @property
+    def _mrep(self):
+        return MinimalStaticOutput(self)
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         return False


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -882,6 +882,8 @@
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
 add_field("VorticitySquared", function=_VorticitySquared,
-          validators=[ValidateSpatial(1)],
+          validators=[ValidateSpatial(1,
+              ["x-velocity","y-velocity","z-velocity"])],
           units=r"\rm{s}^{-2}",
           convert_function=_convertVorticitySquared)
+


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -484,6 +484,15 @@
         if self.num_grids > 40:
             starter = na.random.randint(0, 20)
             random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            # We also add in a bit to make sure that some of the grids have
+            # particles
+            gwp = self.grid_particle_count > 0
+            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+                # We just add one grid.  This is not terribly efficient.
+                first_grid = na.where(gwp)[0][0]
+                random_sample.resize((21,))
+                random_sample[-1] = first_grid
+                mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
             random_sample = na.mgrid[0:max(len(self.grids)-1,1)].astype("int32")


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -296,11 +296,12 @@
 def _dmpdensity(field, data):
     blank = na.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    if 'creation_time' in data.keys():
+    if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
         filter = na.ones(data.NumberOfParticles, dtype='bool')
+    if not filter.any(): return blank
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
                            data["particle_position_y"][filter].astype(na.float64),
                            data["particle_position_z"][filter].astype(na.float64),


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -316,6 +316,13 @@
             self.current_time = \
                 float(self._find_parameter("real", "time", scalar=True))
 
+        if self._flash_version == 7:
+            self.parameters['timestep'] = float(
+                self._handle["simulation parameters"]["timestep"])
+        else:
+            self.parameters['timestep'] = \
+                float(self._find_parameter("real", "dt", scalar=True))
+
         try:
             use_cosmo = self._find_parameter("logical", "usecosmology") 
         except:


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -1,13 +1,14 @@
 """
-API for yt.frontends.chombo
+API for yt.frontends.gdf
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Author: J.S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: MSU
-Homepage: http://yt.Chombotools.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -1,12 +1,15 @@
 """
-Data structures for Chombo.
+Data structures for GDF.
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Matthew Turk, J. S. Oishi.  All Rights Reserved.
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -76,7 +79,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._fhandle = h5py.File(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename,'r')
         AMRHierarchy.__init__(self,pf,data_style)
 
         self._fhandle.close()
@@ -94,31 +97,31 @@
 
     def _count_grids(self):
         self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
-        
+       
     def _parse_hierarchy(self):
         f = self._fhandle 
-        
-        # this relies on the first Group in the H5 file being
-        # 'Chombo_global'
-        levels = f.listnames()[1:]
         dxs=[]
         self.grids = na.empty(self.num_grids, dtype='object')
-        for i, grid in enumerate(f['data'].keys()):
-            self.grids[i] = self.grid(i, self, f['grid_level'][i],
-                                      f['grid_left_index'][i],
-                                      f['grid_dimensions'][i])
-            self.grids[i]._level_id = f['grid_level'][i]
+        levels = (f['grid_level'][:]).copy()
+        glis = (f['grid_left_index'][:]).copy()
+        gdims = (f['grid_dimensions'][:]).copy()
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+            self.grids[i]._level_id = levels[i]
 
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
-            dx = dx/self.parameter_file.refine_by**(f['grid_level'][i])
+            dx = dx/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
         dx = na.array(dxs)
-        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
-        self.grid_dimensions = f['grid_dimensions'][:].astype("int32")
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
         self.grid_particle_count = f['grid_particle_count'][:]
-
+        del levels, glis, gdims
+ 
     def _populate_grid_objects(self):
         for g in self.grids:
             g._prepare_grid()
@@ -130,9 +133,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -171,7 +171,11 @@
         # This should be improved.
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
-            self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+            try:
+                self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+            except:
+                self.units[field_name] = 1.0
+
         self._handle.close()
         del self._handle
         
@@ -181,7 +185,9 @@
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
         self.domain_dimensions = sp["domain_dimensions"][:]
-        self.refine_by = sp["refine_by"]
+        refine_by = sp["refine_by"]
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by 
         self.dimensionality = sp["dimensionality"]
         self.current_time = sp["current_time"]
         self.unique_identifier = sp["unique_identifier"]
@@ -198,6 +204,7 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
         del self._handle


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,11 +1,14 @@
 """
 GDF-specific fields
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2009-2011 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -53,40 +56,31 @@
 add_gdf_field = KnownGDFFields.add_field
 
 add_gdf_field("density", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("density")],
           units=r"\rm{g}/\rm{cm}^3",
           projected_units =r"\rm{g}/\rm{cm}^2")
 
 add_gdf_field("specific_energy", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("specific_energy")],
           units=r"\rm{erg}/\rm{g}")
 
 add_gdf_field("pressure", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("pressure")],
           units=r"\rm{erg}/\rm{g}")
 
-add_gdf_field("velocity_x", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("velocity_x")],
+add_gdf_field("velocity_x", function=NullFunc, take_log=False,
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("velocity_y", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("velocity_y")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("velocity_z", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("velocity_z")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_x")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_y")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_z")],
           units=r"\rm{cm}/\rm{s}")
 
 for f,v in log_translation_dict.items():


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -1,6 +1,8 @@
 """
 The data-file handling functions
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
@@ -35,38 +37,33 @@
     def _field_dict(self,fhandle):
         keys = fhandle['field_types'].keys()
         val = fhandle['field_types'].keys()
-        # ncomp = int(fhandle['/'].attrs['num_components'])
-        # temp =  fhandle['/'].attrs.listitems()[-ncomp:]
-        # val, keys = zip(*temp)
-        # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
         
     def _read_field_names(self,grid):
         fhandle = h5py.File(grid.filename,'r')
-        return fhandle['field_types'].keys()
+        names = fhandle['field_types'].keys()
+        fhandle.close()
+        return names
     
     def _read_data_set(self,grid,field):
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        return fhandle['/data/grid_%010i/'%grid.id+field][:]
-        # field_dict = self._field_dict(fhandle)
-        # lstring = 'level_%i' % grid.Level
-        # lev = fhandle[lstring]
-        # dims = grid.ActiveDimensions
-        # boxsize = dims.prod()
-        
-        # grid_offset = lev[self._offset_string][grid._level_id]
-        # start = grid_offset+field_dict[field]*boxsize
-        # stop = start + boxsize
-        # data = lev[self._data_string][start:stop]
-
-        # return data.reshape(dims, order='F')
-                                          
+        data = (fhandle['/data/grid_%010i/'%grid.id+field][:]).copy()
+        fhandle.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
+        data = (fhandle['/data/grid_%010i/'%grid.id+field][:][sl]).copy()
+        fhandle.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
 
-    # return self._read_data_set(grid,field)[sl]
-


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -24,42 +24,15 @@
 """
 
 import time, types, signal, inspect, traceback, sys, pdb, os
-import warnings, struct
+import warnings, struct, subprocess
 from math import floor, ceil
 
 from yt.utilities.exceptions import *
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.progressbar as pb
 import yt.utilities.rpdb as rpdb
-
-# Some compatibility functions.  In the long run, these *should* disappear as
-# we move toward newer python versions.  Most were implemented to get things
-# running on DataStar.
-
-# If we're running on python2.4, we need a 'wraps' function
-def blank_wrapper(f):
-    return lambda a: a
-
-try:
-    from functools import wraps
-except ImportError:
-    wraps = blank_wrapper
-
-# We need to ensure that we have a defaultdict implementation
-
-class __defaultdict(dict):
-    def __init__(self, func):
-        self.__func = func
-        dict.__init__(self)
-    def __getitem__(self, key):
-        if not self.has_key(key):
-            self.__setitem__(key, self.__func())
-        return dict.__getitem__(self, key)
-
-try:
-    from collections import defaultdict
-except ImportError:
-    defaultdict = __defaultdict
+from collections import defaultdict
+from functools import wraps
 
 # Some functions for handling sequences and other types
 
@@ -78,7 +51,7 @@
     string to a list, for instance ensuring the *fields* as an argument is a
     list.
     """
-    if obj == None:
+    if obj is None:
         return [obj]
     if not isinstance(obj, types.ListType):
         return [obj]
@@ -385,18 +358,6 @@
 def signal_ipython(signo, frame):
     insert_ipython(2)
 
-# We use two signals, SIGUSR1 and SIGUSR2.  In a non-threaded environment,
-# we set up handlers to process these by printing the current stack and to
-# raise a RuntimeError.  The latter can be used, inside pdb, to catch an error
-# and then examine the current stack.
-try:
-    signal.signal(signal.SIGUSR1, signal_print_traceback)
-    mylog.debug("SIGUSR1 registered for traceback printing")
-    signal.signal(signal.SIGUSR2, signal_ipython)
-    mylog.debug("SIGUSR2 registered for IPython Insertion")
-except ValueError:  # Not in main thread
-    pass
-
 def paste_traceback(exc_type, exc, tb):
     """
     This is a traceback handler that knows how to paste to the pastebin.
@@ -450,29 +411,6 @@
     dec_s = ''.join([ chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)) ])
     print dec_s
 
-# If we recognize one of the arguments on the command line as indicating a
-# different mechanism for handling tracebacks, we attach one of those handlers
-# and remove the argument from sys.argv.
-#
-# This fallback is for Paraview:
-if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
-# Now, we check.
-if "--paste" in sys.argv:
-    sys.excepthook = paste_traceback
-    del sys.argv[sys.argv.index("--paste")]
-elif "--paste-detailed" in sys.argv:
-    sys.excepthook = paste_traceback_detailed
-    del sys.argv[sys.argv.index("--paste-detailed")]
-elif "--detailed" in sys.argv:
-    import cgitb; cgitb.enable(format="text")
-    del sys.argv[sys.argv.index("--detailed")]
-elif "--rpdb" in sys.argv:
-    sys.excepthook = rpdb.rpdb_excepthook
-    del sys.argv[sys.argv.index("--rpdb")]
-elif "--detailed" in sys.argv:
-    import cgitb; cgitb.enable(format="text")
-    del sys.argv[sys.argv.index("--detailed")]
-
 #
 # Some exceptions
 #
@@ -482,3 +420,103 @@
 
 class YTEmptyClass(object):
     pass
+
+def update_hg(path, skip_rebuild = False):
+    from mercurial import hg, ui, commands
+    f = open(os.path.join(path, "yt_updater.log"), "a")
+    u = ui.ui()
+    u.pushbuffer()
+    config_fn = os.path.join(path, ".hg", "hgrc")
+    print "Reading configuration from ", config_fn
+    u.readconfig(config_fn)
+    repo = hg.repository(u, path)
+    commands.pull(u, repo)
+    f.write(u.popbuffer())
+    f.write("\n\n")
+    u.pushbuffer()
+    commands.identify(u, repo)
+    if "+" in u.popbuffer():
+        print "Can't rebuild modules by myself."
+        print "You will have to do this yourself.  Here's a sample commands:"
+        print
+        print "    $ cd %s" % (path)
+        print "    $ hg up"
+        print "    $ %s setup.py develop" % (sys.executable)
+        return 1
+    print "Updating the repository"
+    f.write("Updating the repository\n\n")
+    commands.update(u, repo, check=True)
+    if skip_rebuild: return
+    f.write("Rebuilding modules\n\n")
+    p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
+                        stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+    stdout, stderr = p.communicate()
+    f.write(stdout)
+    f.write("\n\n")
+    if p.returncode:
+        print "BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))
+        sys.exit(1)
+    f.write("Successful!\n")
+    print "Updated successfully."
+
+def get_hg_version(path):
+    from mercurial import hg, ui, commands
+    u = ui.ui()
+    u.pushbuffer()
+    repo = hg.repository(u, path)
+    commands.identify(u, repo)
+    return u.popbuffer()
+
+def get_yt_version():
+    import pkg_resources
+    yt_provider = pkg_resources.get_provider("yt")
+    path = os.path.dirname(yt_provider.module_path)
+    version = _get_hg_version(path)[:12]
+    return version
+
+# This code snippet is modified from Georg Brandl
+def bb_apicall(endpoint, data, use_pass = True):
+    import urllib, urllib2
+    uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
+    # since bitbucket doesn't return the required WWW-Authenticate header when
+    # making a request without Authorization, we cannot use the standard urllib2
+    # auth handlers; we have to add the requisite header from the start
+    if data is not None:
+        data = urllib.urlencode(data)
+    req = urllib2.Request(uri, data)
+    if use_pass:
+        username = raw_input("Bitbucket Username? ")
+        password = getpass.getpass()
+        upw = '%s:%s' % (username, password)
+        req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
+    return urllib2.urlopen(req).read()
+
+def get_yt_supp():
+    supp_path = os.path.join(os.environ["YT_DEST"], "src",
+                             "yt-supplemental")
+    # Now we check that the supplemental repository is checked out.
+    if not os.path.isdir(supp_path):
+        print
+        print "*** The yt-supplemental repository is not checked ***"
+        print "*** out.  I can do this for you, but because this ***"
+        print "*** is a delicate act, I require you to respond   ***"
+        print "*** to the prompt with the word 'yes'.            ***"
+        print
+        response = raw_input("Do you want me to try to check it out? ")
+        if response != "yes":
+            print
+            print "Okay, I understand.  You can check it out yourself."
+            print "This command will do it:"
+            print
+            print "$ hg clone http://hg.yt-project.org/yt-supplemental/ ",
+            print "%s" % (supp_path)
+            print
+            sys.exit(1)
+        rv = commands.clone(uu,
+                "http://hg.yt-project.org/yt-supplemental/", supp_path)
+        if rv:
+            print "Something has gone wrong.  Quitting."
+            sys.exit(1)
+    # Now we think we have our supplemental repository.
+    return supp_path
+


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/3d.png
Binary file yt/gui/reason/html/images/3d.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/3d_tab.png
Binary file yt/gui/reason/html/images/3d_tab.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/binary.png
Binary file yt/gui/reason/html/images/binary.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/blockdevice.png
Binary file yt/gui/reason/html/images/blockdevice.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/blockdevice_tab.png
Binary file yt/gui/reason/html/images/blockdevice_tab.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/console.png
Binary file yt/gui/reason/html/images/console.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_down.png
Binary file yt/gui/reason/html/images/double_down.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_down_sm.png
Binary file yt/gui/reason/html/images/double_down_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_left.png
Binary file yt/gui/reason/html/images/double_left.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_left_sm.png
Binary file yt/gui/reason/html/images/double_left_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_right.png
Binary file yt/gui/reason/html/images/double_right.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_right_sm.png
Binary file yt/gui/reason/html/images/double_right_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_up.png
Binary file yt/gui/reason/html/images/double_up.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/double_up_sm.png
Binary file yt/gui/reason/html/images/double_up_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/graph.png
Binary file yt/gui/reason/html/images/graph.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/kivio_flw.png
Binary file yt/gui/reason/html/images/kivio_flw.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_down.png
Binary file yt/gui/reason/html/images/single_down.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_down_sm.png
Binary file yt/gui/reason/html/images/single_down_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_left.png
Binary file yt/gui/reason/html/images/single_left.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_left_sm.png
Binary file yt/gui/reason/html/images/single_left_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_right.png
Binary file yt/gui/reason/html/images/single_right.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_right_sm.png
Binary file yt/gui/reason/html/images/single_right_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_up.png
Binary file yt/gui/reason/html/images/single_up.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/single_up_sm.png
Binary file yt/gui/reason/html/images/single_up_sm.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/images/upload.png
Binary file yt/gui/reason/html/images/upload.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/leaflet/images/marker-shadow.png
Binary file yt/gui/reason/html/leaflet/images/marker-shadow.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/leaflet/images/marker.png
Binary file yt/gui/reason/html/leaflet/images/marker.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/leaflet/images/popup-close.png
Binary file yt/gui/reason/html/leaflet/images/popup-close.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/leaflet/images/zoom-in.png
Binary file yt/gui/reason/html/leaflet/images/zoom-in.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/gui/reason/html/leaflet/images/zoom-out.png
Binary file yt/gui/reason/html/leaflet/images/zoom-out.png has changed


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -35,6 +35,13 @@
 import numpy as na # For historical reasons
 import numpy # In case anyone wishes to use it by name
 
+# This next item will handle most of the actual startup procedures, but it will
+# also attempt to parse the command line and set up the global state of various
+# operations.
+
+import yt.startup_tasks as __startup_tasks
+unparsed_args = __startup_tasks.unparsed_args
+
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -108,7 +115,7 @@
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
-    apply_colormap, scale_image
+    apply_colormap, scale_image, write_projection
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
@@ -122,6 +129,10 @@
 
 from yt.convenience import all_pfs, max_spheres, load, projload
 
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles
+
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/startup_tasks.py
--- /dev/null
+++ b/yt/startup_tasks.py
@@ -0,0 +1,144 @@
+"""
+Very simple convenience function for importing all the modules, setting up
+the namespace and getting the last argument on the command line.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# This handles the command line.
+
+import argparse, os, sys
+
+from yt.config import ytcfg
+from yt.funcs import *
+
+exe_name = os.path.basename(sys.executable)
+# At import time, we determined whether or not we're being run in parallel.
+def turn_on_parallelism():
+    try:
+        from mpi4py import MPI
+        parallel_capable = (MPI.COMM_WORLD.size > 1)
+    except ImportError:
+        parallel_capable = False
+    if parallel_capable:
+        mylog.info("Global parallel computation enabled: %s / %s",
+                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
+        ytcfg["yt","__parallel"] = "True"
+        if exe_name == "embed_enzo" or \
+            ("_parallel" in dir(sys) and sys._parallel == True):
+            ytcfg["yt","inline"] = "True"
+        # I believe we do not need to turn this off manually
+        #ytcfg["yt","StoreParameterFiles"] = "False"
+        # Now let's make sure we have the right options set.
+        if MPI.COMM_WORLD.rank > 0:
+            if ytcfg.getboolean("yt","LogFile"):
+                ytcfg["yt","LogFile"] = "False"
+                yt.utilities.logger.disable_file_logging()
+    return parallel_capable
+
+# This fallback is for Paraview:
+
+# We use two signals, SIGUSR1 and SIGUSR2.  In a non-threaded environment,
+# we set up handlers to process these by printing the current stack and to
+# raise a RuntimeError.  The latter can be used, inside pdb, to catch an error
+# and then examine the current stack.
+try:
+    signal.signal(signal.SIGUSR1, signal_print_traceback)
+    mylog.debug("SIGUSR1 registered for traceback printing")
+    signal.signal(signal.SIGUSR2, signal_ipython)
+    mylog.debug("SIGUSR2 registered for IPython Insertion")
+except ValueError:  # Not in main thread
+    pass
+
+class SetExceptionHandling(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string = None):
+        # If we recognize one of the arguments on the command line as indicating a
+        # different mechanism for handling tracebacks, we attach one of those handlers
+        # and remove the argument from sys.argv.
+        #
+        if self.dest == "paste":
+            sys.excepthook = paste_traceback
+            mylog.debug("Enabling traceback pasting")
+        elif self.dest == "paste-detailed":
+            sys.excepthook = paste_traceback_detailed
+            mylog.debug("Enabling detailed traceback pasting")
+        elif self.dest == "detailed":
+            import cgitb; cgitb.enable(format="text")
+            mylog.debug("Enabling detailed traceback reporting")
+        elif self.dest == "rpdb":
+            sys.excepthook = rpdb.rpdb_excepthook
+            mylog.debug("Enabling remote debugging")
+
+class SetConfigOption(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string = None):
+        param, val = values.split("=")
+        mylog.debug("Overriding config: %s = %s", param, val)
+        ytcfg["yt",param] = val
+
+parser = argparse.ArgumentParser(description = 'yt command line arguments')
+parser.add_argument("--config", action=SetConfigOption,
+    help = "Set configuration option, in the form param=value")
+parser.add_argument("--paste", action=SetExceptionHandling,
+    help = "Paste traceback to paste.yt-project.org", nargs = 0)
+parser.add_argument("--paste-detailed", action=SetExceptionHandling,
+    help = "Paste a detailed traceback with local variables to " +
+           "paste.yt-project.org", nargs = 0)
+parser.add_argument("--detailed", action=SetExceptionHandling,
+    help = "Display detailed traceback.", nargs = 0)
+parser.add_argument("--rpdb", action=SetExceptionHandling,
+    help = "Enable remote pdb interaction (for parallel debugging).", nargs = 0)
+parser.add_argument("--parallel", action="store_true", default=False,
+    dest = "parallel",
+    help = "Run in MPI-parallel mode (must be launched as an MPI task)")
+if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
+
+unparsed_args = []
+
+parallel_capable = False
+if not ytcfg.getboolean("yt","__command_line"):
+    opts, unparsed_args = parser.parse_known_args()
+    # THIS IS NOT SUCH A GOOD IDEA:
+    #sys.argv = [a for a in unparsed_args]
+    if opts.parallel:
+        parallel_capable = turn_on_parallelism()
+else:
+    subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
+    def print_help(*args, **kwargs):
+        parser.print_help()
+    help_parser = subparsers.add_parser("help", help="Print help message")
+    help_parser.set_defaults(func=print_help)
+
+
+if parallel_capable == True:
+    pass
+elif exe_name in \
+        ["mpi4py", "embed_enzo",
+         "python"+sys.version[:3]+"-mpi"] \
+    or '_parallel' in dir(sys) \
+    or any(["ipengine" in arg for arg in sys.argv]):
+    parallel_capable = turn_on_parallelism()
+else:
+    parallel_capable = False


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -65,6 +65,9 @@
     double log2(double x)
     long int lrint(double x)
     double fabs(double x)
+    double cos(double x)
+    double sin(double x)
+    double asin(double x)
 
 cdef struct Triangle:
     Triangle *next
@@ -238,6 +241,33 @@
         tr[i] = ipnest
     return tr
 
+def arr_fisheye_vectors(int resolution, np.float64_t fov):
+    # We now follow figures 4-7 of:
+    # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+    # ...but all in Cython.
+    cdef np.ndarray[np.float64_t, ndim=3] vp
+    cdef int i, j, k
+    cdef np.float64_t r, phi, theta, px, py
+    cdef np.float64_t pi = 3.1415926
+    cdef np.float64_t fov_rad = fov * pi / 180.0
+    vp = np.zeros((resolution, resolution, 3), dtype="float64")
+    for i in range(resolution):
+        px = 2.0 * i / (resolution) - 1.0
+        for j in range(resolution):
+            py = 2.0 * j / (resolution) - 1.0
+            r = (px*px + py*py)**0.5
+            if r == 0.0:
+                phi = 0.0
+            elif px < 0:
+                phi = pi - asin(py / r)
+            else:
+                phi = asin(py / r)
+            theta = r * fov_rad / 2.0
+            vp[i,j,0] = sin(theta) * cos(phi)
+            vp[i,j,1] = sin(theta) * sin(phi)
+            vp[i,j,2] = cos(theta)
+    return vp
+
 cdef class star_kdtree_container:
     cdef kdtree_utils.kdtree *tree
     cdef public np.float64_t sigma


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -45,17 +45,18 @@
     cdef int i
     cdef np.float64_t mi = 1e100, ma = -1e100, v
     cdef int np = px.shape[0]
-    for i in range(np):
-        v = value[i]
-        if v < mi or v > ma:
-            if px[i] + pdx[i] < leftx: continue
-            if px[i] - pdx[i] > rightx: continue
-            if py[i] + pdy[i] < lefty: continue
-            if py[i] - pdy[i] > righty: continue
-            if pdx[i] < mindx or pdy[i] < mindx: continue
-            if maxdx > 0 and (pdx[i] > maxdx or pdy[i] > maxdx): continue
-            if v < mi: mi = v
-            if v > ma: ma = v
+    with nogil:
+        for i in range(np):
+            v = value[i]
+            if v < mi or v > ma:
+                if px[i] + pdx[i] < leftx: continue
+                if px[i] - pdx[i] > rightx: continue
+                if py[i] + pdy[i] < lefty: continue
+                if py[i] - pdy[i] > righty: continue
+                if pdx[i] < mindx or pdy[i] < mindx: continue
+                if maxdx > 0 and (pdx[i] > maxdx or pdy[i] > maxdx): continue
+                if v < mi: mi = v
+                if v > ma: ma = v
     return (mi, ma)
 
 @cython.boundscheck(False)


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -24,7 +24,7 @@
 """
 
 import runner, output_tests
-from runner import RegressionTestRunner, run_main
+from runner import RegressionTestRunner
 
 from output_tests import RegressionTest, SingleOutputTest, \
     MultipleOutputTest, YTStaticOutputTest, create_test


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -31,7 +31,6 @@
 from .runner import \
     RegressionTestRunner, \
     RegressionTestStorage, \
-    run_main, \
     clear_registry, \
     registry_entries
 


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -27,8 +27,8 @@
 import os, shelve, cPickle, sys, imp, tempfile
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
-import yt.utilities.cmdln as cmdln
 from yt.funcs import *
+from yt.utilities.command_line import YTCommand
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -169,98 +169,22 @@
             print "Running '%s'" % (test_name)
             self.run_test(line.strip())
 
-class EnzoTestRunnerCommands(cmdln.Cmdln):
-    name = "enzo_tests"
+def _load_modules(test_modules):
+    for fn in test_modules:
+        if fn.endswith(".py"): fn = fn[:-3]
+        print "Loading module %s" % (fn)
+        mname = os.path.basename(fn)
+        f, filename, desc = imp.find_module(mname, [os.path.dirname(fn)])
+        project = imp.load_module(mname, f, filename, desc)
 
-    def _load_modules(self, test_modules):
-        for fn in test_modules:
-            if fn.endswith(".py"): fn = fn[:-3]
-            print "Loading module %s" % (fn)
-            mname = os.path.basename(fn)
-            f, filename, desc = imp.find_module(mname, [os.path.dirname(fn)])
-            project = imp.load_module(mname, f, filename, desc)
-
-    def _update_io_log(self, opts, kwargs):
-        if opts.datasets is None or len(opts.datasets) == 0: return
-        f = tempfile.NamedTemporaryFile()
-        kwargs['io_log'] = f.name
-        for d in opts.datasets:
-            fn = os.path.expanduser(d)
-            print "Registered dataset %s" % fn
-            f.write("DATASET WRITTEN %s\n" % fn)
-        f.flush()
-        f.seek(0)
-        return f
-
-    @cmdln.option("-f", "--dataset", action="append",
-                  help="override the io_log and add this to the new one",
-                  dest="datasets")
-    @cmdln.option("-p", "--results-path", action="store",
-                  help="which directory should results be stored in",
-                  dest="results_path", default=".")
-    def do_store(self, subcmd, opts, name, *test_modules):
-        """
-        ${cmd_name}: Run and store a new dataset.
-
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-        sys.path.insert(0, ".")
-        self._load_modules(test_modules)
-        kwargs = {}
-        f = self._update_io_log(opts, kwargs)
-        test_runner = RegressionTestRunner(name,
-                results_path = opts.results_path,
-                **kwargs)
-        test_runner.run_all_tests()
-
-    @cmdln.option("-o", "--output", action="store",
-                  help="output results to file",
-                  dest="outputfile", default=None)
-    @cmdln.option("-p", "--results-path", action="store",
-                  help="which directory should results be stored in",
-                  dest="results_path", default=".")
-    @cmdln.option("-n", "--nose", action="store_true",
-                  help="run through nose with xUnit testing",
-                  dest="run_nose", default=False)
-    @cmdln.option("-f", "--dataset", action="append",
-                  help="override the io_log and add this to the new one",
-                  dest="datasets")
-    def do_compare(self, subcmd, opts, reference, comparison, *test_modules):
-        """
-        ${cmd_name}: Compare a reference dataset against a new dataset.  The
-        new dataset will be run regardless of whether it exists or not.
-
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-        if comparison == "__CURRENT__":
-            import pkg_resources
-            yt_provider = pkg_resources.get_provider("yt")
-            path = os.path.dirname(yt_provider.module_path)
-            from yt.utilities.command_line import _get_hg_version
-            comparison = _get_hg_version(path)[:12]
-            print "Setting comparison to: %s" % (comparison)
-        sys.path.insert(0, ".")
-        self._load_modules(test_modules)
-        kwargs = {}
-        f = self._update_io_log(opts, kwargs)
-        test_runner = RegressionTestRunner(comparison, reference,
-                            results_path=opts.results_path,
-                            **kwargs)
-        if opts.run_nose:
-            test_runner.watcher = Xunit()
-        results = test_runner.run_all_tests()
-        if opts.run_nose:
-            test_runner.watcher.report()
-        if opts.outputfile is not None:
-            f = open(str(opts.outputfile), "w")
-            for testname, success in sorted(results.items()):
-                f.write("%s %s\n" % (testname.ljust(100), success))
-
-def run_main():
-    etrc = EnzoTestRunnerCommands()
-    sys.exit(etrc.main())
-
-if __name__ == "__main__":
-    run_main()
+def _update_io_log(opts, kwargs):
+    if opts.datasets is None or len(opts.datasets) == 0: return
+    f = tempfile.NamedTemporaryFile()
+    kwargs['io_log'] = f.name
+    for d in opts.datasets:
+        fn = os.path.expanduser(d)
+        print "Registered dataset %s" % fn
+        f.write("DATASET WRITTEN %s\n" % fn)
+    f.flush()
+    f.seek(0)
+    return f


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/cmdln.py
--- a/yt/utilities/cmdln.py
+++ /dev/null
@@ -1,1586 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2002-2007 ActiveState Software Inc.
-# License: MIT (see LICENSE.txt for license details)
-# Author:  Trent Mick
-# Home:    http://trentm.com/projects/cmdln/
-
-"""An improvement on Python's standard cmd.py module.
-
-As with cmd.py, this module provides "a simple framework for writing
-line-oriented command intepreters."  This module provides a 'RawCmdln'
-class that fixes some design flaws in cmd.Cmd, making it more scalable
-and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
-or simple shells.  And it provides a 'Cmdln' class that add
-optparse-based option processing. Basically you use it like this:
-
-    import cmdln
-
-    class MySVN(cmdln.Cmdln):
-        name = "svn"
-
-        @cmdln.alias('stat', 'st')
-        @cmdln.option('-v', '--verbose', action='store_true'
-                      help='print verbose information')
-        def do_status(self, subcmd, opts, *paths):
-            print "handle 'svn status' command"
-
-        #...
-
-    if __name__ == "__main__":
-        shell = MySVN()
-        retval = shell.main()
-        sys.exit(retval)
-
-See the README.txt or <http://trentm.com/projects/cmdln/> for more
-details.
-"""
-
-__version_info__ = (1, 1, 2)
-__version__ = '.'.join(map(str, __version_info__))
-
-import os
-import sys
-import re
-import cmd
-import optparse
-from pprint import pprint
-import sys
-
-
-
-
-#---- globals
-
-LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
-
-# An unspecified optional argument when None is a meaningful value.
-_NOT_SPECIFIED = ("Not", "Specified")
-
-# Pattern to match a TypeError message from a call that
-# failed because of incorrect number of arguments (see
-# Python/getargs.c).
-_INCORRECT_NUM_ARGS_RE = re.compile(
-    r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
-
-
-
-#---- exceptions
-
-class CmdlnError(Exception):
-    """A cmdln.py usage error."""
-    def __init__(self, msg):
-        self.msg = msg
-    def __str__(self):
-        return self.msg
-
-class CmdlnUserError(Exception):
-    """An error by a user of a cmdln-based tool/shell."""
-    pass
-
-
-
-#---- public methods and classes
-
-def alias(*aliases):
-    """Decorator to add aliases for Cmdln.do_* command handlers.
-    
-    Example:
-        class MyShell(cmdln.Cmdln):
-            @cmdln.alias("!", "sh")
-            def do_shell(self, argv):
-                #...implement 'shell' command
-    """
-    def decorate(f):
-        if not hasattr(f, "aliases"):
-            f.aliases = []
-        f.aliases += aliases
-        return f
-    return decorate
-
-
-class RawCmdln(cmd.Cmd):
-    """An improved (on cmd.Cmd) framework for building multi-subcommand
-    scripts (think "svn" & "cvs") and simple shells (think "pdb" and
-    "gdb").
-
-    A simple example:
-
-        import cmdln
-
-        class MySVN(cmdln.RawCmdln):
-            name = "svn"
-
-            @cmdln.aliases('stat', 'st')
-            def do_status(self, argv):
-                print "handle 'svn status' command"
-
-        if __name__ == "__main__":
-            shell = MySVN()
-            retval = shell.main()
-            sys.exit(retval)
-
-    See <http://trentm.com/projects/cmdln> for more information.
-    """
-    name = None      # if unset, defaults basename(sys.argv[0])
-    prompt = None    # if unset, defaults to self.name+"> "
-    version = None   # if set, default top-level options include --version
-
-    # Default messages for some 'help' command error cases.
-    # They are interpolated with one arg: the command.
-    nohelp = "no help on '%s'"
-    unknowncmd = "unknown command: '%s'"
-
-    helpindent = '' # string with which to indent help output
-
-    def __init__(self, completekey='tab', 
-                 stdin=None, stdout=None, stderr=None):
-        """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
-
-        The optional argument 'completekey' is the readline name of a
-        completion key; it defaults to the Tab key. If completekey is
-        not None and the readline module is available, command completion
-        is done automatically.
-        
-        The optional arguments 'stdin', 'stdout' and 'stderr' specify
-        alternate input, output and error output file objects; if not
-        specified, sys.* are used.
-        
-        If 'stdout' but not 'stderr' is specified, stdout is used for
-        error output. This is to provide least surprise for users used
-        to only the 'stdin' and 'stdout' options with cmd.Cmd.
-        """
-        import sys
-        if self.name is None:
-            self.name = os.path.basename(sys.argv[0])
-        if self.prompt is None:
-            self.prompt = self.name+"> "
-        self._name_str = self._str(self.name)
-        self._prompt_str = self._str(self.prompt)
-        if stdin is not None:
-            self.stdin = stdin
-        else:
-            self.stdin = sys.stdin
-        if stdout is not None:
-            self.stdout = stdout
-        else:
-            self.stdout = sys.stdout
-        if stderr is not None:
-            self.stderr = stderr
-        elif stdout is not None:
-            self.stderr = stdout
-        else:
-            self.stderr = sys.stderr
-        self.cmdqueue = []
-        self.completekey = completekey
-        self.cmdlooping = False
-
-    def get_optparser(self):
-        """Hook for subclasses to set the option parser for the
-        top-level command/shell.
-
-        This option parser is used retrieved and used by `.main()' to
-        handle top-level options.
-
-        The default implements a single '-h|--help' option. Sub-classes
-        can return None to have no options at the top-level. Typically
-        an instance of CmdlnOptionParser should be returned.
-        """
-        version = (self.version is not None 
-                    and "%s %s" % (self._name_str, self.version)
-                    or None)
-        return CmdlnOptionParser(self, version=version)
-
-    def postoptparse(self):
-        """Hook method executed just after `.main()' parses top-level
-        options.
-
-        When called `self.options' holds the results of the option parse.
-        """
-        pass
-
-    def main(self, argv=None, loop=LOOP_NEVER):
-        """A possible mainline handler for a script, like so:
-
-            import cmdln
-            class MyCmd(cmdln.Cmdln):
-                name = "mycmd"
-                ...
-            
-            if __name__ == "__main__":
-                MyCmd().main()
-
-        By default this will use sys.argv to issue a single command to
-        'MyCmd', then exit. The 'loop' argument can be use to control
-        interactive shell behaviour.
-        
-        Arguments:
-            "argv" (optional, default sys.argv) is the command to run.
-                It must be a sequence, where the first element is the
-                command name and subsequent elements the args for that
-                command.
-            "loop" (optional, default LOOP_NEVER) is a constant
-                indicating if a command loop should be started (i.e. an
-                interactive shell). Valid values (constants on this module):
-                    LOOP_ALWAYS     start loop and run "argv", if any
-                    LOOP_NEVER      run "argv" (or .emptyline()) and exit
-                    LOOP_IF_EMPTY   run "argv", if given, and exit;
-                                    otherwise, start loop
-        """
-        if argv is None:
-            import sys
-            argv = sys.argv
-        else:
-            argv = argv[:] # don't modify caller's list
-
-        self.optparser = self.get_optparser()
-        if self.optparser: # i.e. optparser=None means don't process for opts
-            try:
-                self.options, args = self.optparser.parse_args(argv[1:])
-            except CmdlnUserError, ex:
-                msg = "%s: %s\nTry '%s help' for info.\n"\
-                      % (self.name, ex, self.name)
-                self.stderr.write(self._str(msg))
-                self.stderr.flush()
-                return 1
-            except StopOptionProcessing, ex:
-                return 0
-        else:
-            self.options, args = None, argv[1:]
-        self.postoptparse()
-
-        if loop == LOOP_ALWAYS:
-            if args:
-                self.cmdqueue.append(args)
-            return self.cmdloop()
-        elif loop == LOOP_NEVER:
-            if args:
-                return self.cmd(args)
-            else:
-                return self.emptyline()
-        elif loop == LOOP_IF_EMPTY:
-            if args:
-                return self.cmd(args)
-            else:
-                return self.cmdloop()
-
-    def cmd(self, argv):
-        """Run one command and exit.
-        
-            "argv" is the arglist for the command to run. argv[0] is the
-                command to run. If argv is an empty list then the
-                'emptyline' handler is run.
-
-        Returns the return value from the command handler.
-        """
-        assert isinstance(argv, (list, tuple)), \
-                "'argv' is not a sequence: %r" % argv
-        retval = None
-        try:
-            argv = self.precmd(argv)
-            retval = self.onecmd(argv)
-            self.postcmd(argv)
-        except:
-            if not self.cmdexc(argv):
-                raise
-            retval = 1
-        return retval
-
-    def _str(self, s):
-        """Safely convert the given str/unicode to a string for printing."""
-        try:
-            return str(s)
-        except UnicodeError:
-            #XXX What is the proper encoding to use here? 'utf-8' seems
-            #    to work better than "getdefaultencoding" (usually
-            #    'ascii'), on OS X at least.
-            #import sys
-            #return s.encode(sys.getdefaultencoding(), "replace")
-            return s.encode("utf-8", "replace")
-
-    def cmdloop(self, intro=None):
-        """Repeatedly issue a prompt, accept input, parse into an argv, and
-        dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
-        the argv. In other words, start a shell.
-        
-            "intro" (optional) is a introductory message to print when
-                starting the command loop. This overrides the class
-                "intro" attribute, if any.
-        """
-        self.cmdlooping = True
-        self.preloop()
-        if self.use_rawinput and self.completekey:
-            try:
-                import readline
-                self.old_completer = readline.get_completer()
-                readline.set_completer(self.complete)
-                readline.parse_and_bind(self.completekey+": complete")
-            except ImportError:
-                pass
-        try:
-            if intro is None:
-                intro = self.intro
-            if intro:
-                intro_str = self._str(intro)
-                self.stdout.write(intro_str+'\n')
-            self.stop = False
-            retval = None
-            while not self.stop:
-                if self.cmdqueue:
-                    argv = self.cmdqueue.pop(0)
-                    assert isinstance(argv, (list, tuple)), \
-                            "item on 'cmdqueue' is not a sequence: %r" % argv
-                else:
-                    if self.use_rawinput:
-                        try:
-                            line = raw_input(self._prompt_str)
-                        except EOFError:
-                            line = 'EOF'
-                    else:
-                        self.stdout.write(self._prompt_str)
-                        self.stdout.flush()
-                        line = self.stdin.readline()
-                        if not len(line):
-                            line = 'EOF'
-                        else:
-                            line = line[:-1] # chop '\n'
-                    argv = line2argv(line)
-                try:
-                    argv = self.precmd(argv)
-                    retval = self.onecmd(argv)
-                    self.postcmd(argv)
-                except:
-                    if not self.cmdexc(argv):
-                        raise
-                    retval = 1
-                self.lastretval = retval
-            self.postloop()
-        finally:
-            if self.use_rawinput and self.completekey:
-                try:
-                    import readline
-                    readline.set_completer(self.old_completer)
-                except ImportError:
-                    pass
-        self.cmdlooping = False
-        return retval
-
-    def precmd(self, argv):
-        """Hook method executed just before the command argv is
-        interpreted, but after the input prompt is generated and issued.
-
-            "argv" is the cmd to run.
-            
-        Returns an argv to run (i.e. this method can modify the command
-        to run).
-        """
-        return argv
-
-    def postcmd(self, argv):
-        """Hook method executed just after a command dispatch is finished.
-        
-            "argv" is the command that was run.
-        """
-        pass
-
-    def cmdexc(self, argv):
-        """Called if an exception is raised in any of precmd(), onecmd(),
-        or postcmd(). If True is returned, the exception is deemed to have
-        been dealt with. Otherwise, the exception is re-raised.
-
-        The default implementation handles CmdlnUserError's, which
-        typically correspond to user error in calling commands (as
-        opposed to programmer error in the design of the script using
-        cmdln.py).
-        """
-        import sys
-        type, exc, traceback = sys.exc_info()
-        if isinstance(exc, CmdlnUserError):
-            msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
-                  % (self.name, argv[0], exc, self.name, argv[0])
-            self.stderr.write(self._str(msg))
-            self.stderr.flush()
-            return True
-
-    def onecmd(self, argv):
-        if not argv:
-            return self.emptyline()
-        self.lastcmd = argv
-        cmdname = self._get_canonical_cmd_name(argv[0])
-        if cmdname:
-            handler = self._get_cmd_handler(cmdname)
-            if handler:
-                return self._dispatch_cmd(handler, argv)
-        return self.default(argv)
-
-    def _dispatch_cmd(self, handler, argv):
-        return handler(argv)
-
-    def default(self, argv):
-        """Hook called to handle a command for which there is no handler.
-
-            "argv" is the command and arguments to run.
-        
-        The default implementation writes and error message to stderr
-        and returns an error exit status.
-
-        Returns a numeric command exit status.
-        """
-        errmsg = self._str(self.unknowncmd % (argv[0],))
-        if self.cmdlooping:
-            self.stderr.write(errmsg+"\n")
-        else:
-            self.stderr.write("%s: %s\nTry '%s help' for info.\n"
-                              % (self._name_str, errmsg, self._name_str))
-        self.stderr.flush()
-        return 1
-
-    def parseline(self, line):
-        # This is used by Cmd.complete (readline completer function) to
-        # massage the current line buffer before completion processing.
-        # We override to drop special '!' handling.
-        line = line.strip()
-        if not line:
-            return None, None, line
-        elif line[0] == '?':
-            line = 'help ' + line[1:]
-        i, n = 0, len(line)
-        while i < n and line[i] in self.identchars: i = i+1
-        cmd, arg = line[:i], line[i:].strip()
-        return cmd, arg, line
-
-    def helpdefault(self, cmd, known):
-        """Hook called to handle help on a command for which there is no
-        help handler.
-
-            "cmd" is the command name on which help was requested.
-            "known" is a boolean indicating if this command is known
-                (i.e. if there is a handler for it).
-        
-        Returns a return code.
-        """
-        if known:
-            msg = self._str(self.nohelp % (cmd,))
-            if self.cmdlooping:
-                self.stderr.write(msg + '\n')
-            else:
-                self.stderr.write("%s: %s\n" % (self.name, msg))
-        else:
-            msg = self.unknowncmd % (cmd,)
-            if self.cmdlooping:
-                self.stderr.write(msg + '\n')
-            else:
-                self.stderr.write("%s: %s\n"
-                                  "Try '%s help' for info.\n"
-                                  % (self.name, msg, self.name))
-        self.stderr.flush()
-        return 1
-
-    def do_help(self, argv):
-        """${cmd_name}: give detailed help on a specific sub-command
-
-        Usage:
-            ${name} help [COMMAND]
-        """
-        if len(argv) > 1: # asking for help on a particular command
-            doc = None
-            cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
-            if not cmdname:
-                return self.helpdefault(argv[1], False)
-            else:
-                helpfunc = getattr(self, "help_"+cmdname, None)
-                if helpfunc:
-                    doc = helpfunc()
-                else:
-                    handler = self._get_cmd_handler(cmdname)
-                    if handler:
-                        doc = handler.__doc__
-                    if doc is None:
-                        return self.helpdefault(argv[1], handler != None)
-        else: # bare "help" command
-            doc = self.__class__.__doc__  # try class docstring
-            if doc is None:
-                # Try to provide some reasonable useful default help.
-                if self.cmdlooping: prefix = ""
-                else:               prefix = self.name+' '
-                doc = """Usage:
-                    %sCOMMAND [ARGS...]
-                    %shelp [COMMAND]
-
-                ${option_list}
-                ${command_list}
-                ${help_list}
-                """ % (prefix, prefix)
-            cmdname = None
-
-        if doc: # *do* have help content, massage and print that
-            doc = self._help_reindent(doc)
-            doc = self._help_preprocess(doc, cmdname)
-            doc = doc.rstrip() + '\n' # trim down trailing space
-            self.stdout.write(self._str(doc))
-            self.stdout.flush()
-    do_help.aliases = ["?"]
-
-    def _help_reindent(self, help, indent=None):
-        """Hook to re-indent help strings before writing to stdout.
-
-            "help" is the help content to re-indent
-            "indent" is a string with which to indent each line of the
-                help content after normalizing. If unspecified or None
-                then the default is use: the 'self.helpindent' class
-                attribute. By default this is the empty string, i.e.
-                no indentation.
-
-        By default, all common leading whitespace is removed and then
-        the lot is indented by 'self.helpindent'. When calculating the
-        common leading whitespace the first line is ignored -- hence
-        help content for Conan can be written as follows and have the
-        expected indentation:
-
-            def do_crush(self, ...):
-                '''${cmd_name}: crush your enemies, see them driven before you...
-
-                c.f. Conan the Barbarian'''
-        """
-        if indent is None:
-            indent = self.helpindent
-        lines = help.splitlines(0)
-        _dedentlines(lines, skip_first_line=True)
-        lines = [(indent+line).rstrip() for line in lines]
-        return '\n'.join(lines)
-
-    def _help_preprocess(self, help, cmdname):
-        """Hook to preprocess a help string before writing to stdout.
-
-            "help" is the help string to process.
-            "cmdname" is the canonical sub-command name for which help
-                is being given, or None if the help is not specific to a
-                command.
-
-        By default the following template variables are interpolated in
-        help content. (Note: these are similar to Python 2.4's
-        string.Template interpolation but not quite.)
-
-        ${name}
-            The tool's/shell's name, i.e. 'self.name'.
-        ${option_list}
-            A formatted table of options for this shell/tool.
-        ${command_list}
-            A formatted table of available sub-commands.
-        ${help_list}
-            A formatted table of additional help topics (i.e. 'help_*'
-            methods with no matching 'do_*' method).
-        ${cmd_name}
-            The name (and aliases) for this sub-command formatted as:
-            "NAME (ALIAS1, ALIAS2, ...)".
-        ${cmd_usage}
-            A formatted usage block inferred from the command function
-            signature.
-        ${cmd_option_list}
-            A formatted table of options for this sub-command. (This is
-            only available for commands using the optparse integration,
-            i.e.  using @cmdln.option decorators or manually setting the
-            'optparser' attribute on the 'do_*' method.)
-
-        Returns the processed help. 
-        """
-        preprocessors = {
-            "${name}":            self._help_preprocess_name,
-            "${option_list}":     self._help_preprocess_option_list,
-            "${command_list}":    self._help_preprocess_command_list,
-            "${help_list}":       self._help_preprocess_help_list,
-            "${cmd_name}":        self._help_preprocess_cmd_name,
-            "${cmd_usage}":       self._help_preprocess_cmd_usage,
-            "${cmd_option_list}": self._help_preprocess_cmd_option_list,
-        }
-
-        for marker, preprocessor in preprocessors.items():
-            if marker in help:
-                help = preprocessor(help, cmdname)
-        return help
-
-    def _help_preprocess_name(self, help, cmdname=None):
-        return help.replace("${name}", self.name)
-
-    def _help_preprocess_option_list(self, help, cmdname=None):
-        marker = "${option_list}"
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        if self.optparser:
-            # Setup formatting options and format.
-            # - Indentation of 4 is better than optparse default of 2.
-            #   C.f. Damian Conway's discussion of this in Perl Best
-            #   Practices.
-            self.optparser.formatter.indent_increment = 4
-            self.optparser.formatter.current_indent = indent_width
-            block = self.optparser.format_option_help() + '\n'
-        else:
-            block = ""
-            
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-
-    def _help_preprocess_command_list(self, help, cmdname=None):
-        marker = "${command_list}"
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        # Find any aliases for commands.
-        token2canonical = self._get_canonical_map()
-        aliases = {}
-        for token, cmdname in token2canonical.items():
-            if token == cmdname: continue
-            aliases.setdefault(cmdname, []).append(token)
-
-        # Get the list of (non-hidden) commands and their
-        # documentation, if any.
-        cmdnames = {} # use a dict to strip duplicates
-        for attr in self.get_names():
-            if attr.startswith("do_"):
-                cmdnames[attr[3:]] = True
-        cmdnames = cmdnames.keys()
-        cmdnames.sort()
-        linedata = []
-        for cmdname in cmdnames:
-            if aliases.get(cmdname):
-                a = aliases[cmdname]
-                a.sort()
-                cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
-            else:
-                cmdstr = cmdname
-            doc = None
-            try:
-                helpfunc = getattr(self, 'help_'+cmdname)
-            except AttributeError:
-                handler = self._get_cmd_handler(cmdname)
-                if handler:
-                    doc = handler.__doc__
-            else:
-                doc = helpfunc()
-                
-            # Strip "${cmd_name}: " from the start of a command's doc. Best
-            # practice dictates that command help strings begin with this, but
-            # it isn't at all wanted for the command list.
-            to_strip = "${cmd_name}:"
-            if doc and doc.startswith(to_strip):
-                #log.debug("stripping %r from start of %s's help string",
-                #          to_strip, cmdname)
-                doc = doc[len(to_strip):].lstrip()
-            linedata.append( (cmdstr, doc) )
-
-        if linedata:
-            subindent = indent + ' '*4
-            lines = _format_linedata(linedata, subindent, indent_width+4)
-            block = indent + "Commands:\n" \
-                    + '\n'.join(lines) + "\n\n"
-            help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    def _gen_names_and_attrs(self):
-        # Inheritance says we have to look in class and
-        # base classes; order is not important.
-        names = []
-        classes = [self.__class__]
-        while classes:
-            aclass = classes.pop(0)
-            if aclass.__bases__:
-                classes = classes + list(aclass.__bases__)
-            for name in dir(aclass):
-                yield (name, getattr(aclass, name))
-
-    def _help_preprocess_help_list(self, help, cmdname=None):
-        marker = "${help_list}"
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        # Determine the additional help topics, if any.
-        helpnames = {}
-        token2cmdname = self._get_canonical_map()
-        for attrname, attr in self._gen_names_and_attrs():
-            if not attrname.startswith("help_"): continue
-            helpname = attrname[5:]
-            if helpname not in token2cmdname:
-                helpnames[helpname] = attr
-
-        if helpnames:
-            linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()]
-            linedata.sort()
-
-            subindent = indent + ' '*4
-            lines = _format_linedata(linedata, subindent, indent_width+4)
-            block = (indent
-                    + "Additional help topics (run `%s help TOPIC'):\n" % self.name
-                    + '\n'.join(lines)
-                    + "\n\n")
-        else:
-            block = ''
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    def _help_preprocess_cmd_name(self, help, cmdname=None):
-        marker = "${cmd_name}"
-        handler = self._get_cmd_handler(cmdname)
-        if not handler:
-            raise CmdlnError("cannot preprocess '%s' into help string: "
-                             "could not find command handler for %r" 
-                             % (marker, cmdname))
-        s = cmdname
-        if hasattr(handler, "aliases"):
-            s += " (%s)" % (", ".join(handler.aliases))
-        help = help.replace(marker, s)
-        return help
-
-    #TODO: this only makes sense as part of the Cmdln class.
-    #      Add hooks to add help preprocessing template vars and put
-    #      this one on that class.
-    def _help_preprocess_cmd_usage(self, help, cmdname=None):
-        marker = "${cmd_usage}"
-        handler = self._get_cmd_handler(cmdname)
-        if not handler:
-            raise CmdlnError("cannot preprocess '%s' into help string: "
-                             "could not find command handler for %r" 
-                             % (marker, cmdname))
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        # Extract the introspection bits we need.
-        func = handler.im_func
-        if func.func_defaults:
-            func_defaults = list(func.func_defaults)
-        else:
-            func_defaults = []
-        co_argcount = func.func_code.co_argcount
-        co_varnames = func.func_code.co_varnames
-        co_flags = func.func_code.co_flags
-        CO_FLAGS_ARGS = 4
-        CO_FLAGS_KWARGS = 8
-
-        # Adjust argcount for possible *args and **kwargs arguments.
-        argcount = co_argcount
-        if co_flags & CO_FLAGS_ARGS:   argcount += 1
-        if co_flags & CO_FLAGS_KWARGS: argcount += 1
-
-        # Determine the usage string.
-        usage = "%s %s" % (self.name, cmdname)
-        if argcount <= 2:   # handler ::= do_FOO(self, argv)
-            usage += " [ARGS...]"
-        elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
-            argnames = list(co_varnames[3:argcount])
-            tail = ""
-            if co_flags & CO_FLAGS_KWARGS:
-                name = argnames.pop(-1)
-                import warnings
-                # There is no generally accepted mechanism for passing
-                # keyword arguments from the command line. Could
-                # *perhaps* consider: arg=value arg2=value2 ...
-                warnings.warn("argument '**%s' on '%s.%s' command "
-                              "handler will never get values" 
-                              % (name, self.__class__.__name__,
-                                 func.func_name))
-            if co_flags & CO_FLAGS_ARGS:
-                name = argnames.pop(-1)
-                tail = "[%s...]" % name.upper()
-            while func_defaults:
-                func_defaults.pop(-1)
-                name = argnames.pop(-1)
-                tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
-            while argnames:
-                name = argnames.pop(-1)
-                tail = "%s %s" % (name.upper(), tail)
-            usage += ' ' + tail
-
-        block_lines = [
-            self.helpindent + "Usage:",
-            self.helpindent + ' '*4 + usage
-        ]
-        block = '\n'.join(block_lines) + '\n\n'
-
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    #TODO: this only makes sense as part of the Cmdln class.
-    #      Add hooks to add help preprocessing template vars and put
-    #      this one on that class.
-    def _help_preprocess_cmd_option_list(self, help, cmdname=None):
-        marker = "${cmd_option_list}"
-        handler = self._get_cmd_handler(cmdname)
-        if not handler:
-            raise CmdlnError("cannot preprocess '%s' into help string: "
-                             "could not find command handler for %r" 
-                             % (marker, cmdname))
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-        if hasattr(handler, "optparser"):
-            # Setup formatting options and format.
-            # - Indentation of 4 is better than optparse default of 2.
-            #   C.f. Damian Conway's discussion of this in Perl Best
-            #   Practices.
-            handler.optparser.formatter.indent_increment = 4
-            handler.optparser.formatter.current_indent = indent_width
-            block = handler.optparser.format_option_help() + '\n'
-        else:
-            block = ""
-
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    def _get_canonical_cmd_name(self, token):
-        map = self._get_canonical_map()
-        return map.get(token, None)
-
-    def _get_canonical_map(self):
-        """Return a mapping of available command names and aliases to
-        their canonical command name.
-        """
-        cacheattr = "_token2canonical"
-        if not hasattr(self, cacheattr):
-            # Get the list of commands and their aliases, if any.
-            token2canonical = {}
-            cmd2funcname = {} # use a dict to strip duplicates
-            for attr in self.get_names():
-                if attr.startswith("do_"):    cmdname = attr[3:]
-                elif attr.startswith("_do_"): cmdname = attr[4:]
-                else:
-                    continue
-                cmd2funcname[cmdname] = attr
-                token2canonical[cmdname] = cmdname
-            for cmdname, funcname in cmd2funcname.items(): # add aliases
-                func = getattr(self, funcname)
-                aliases = getattr(func, "aliases", [])
-                for alias in aliases:
-                    if alias in cmd2funcname:
-                        import warnings
-                        warnings.warn("'%s' alias for '%s' command conflicts "
-                                      "with '%s' handler"
-                                      % (alias, cmdname, cmd2funcname[alias]))
-                        continue
-                    token2canonical[alias] = cmdname
-            setattr(self, cacheattr, token2canonical)
-        return getattr(self, cacheattr)
-
-    def _get_cmd_handler(self, cmdname):
-        handler = None
-        try:
-            handler = getattr(self, 'do_' + cmdname)
-        except AttributeError:
-            try:
-                # Private command handlers begin with "_do_".
-                handler = getattr(self, '_do_' + cmdname)
-            except AttributeError:
-                pass
-        return handler
-
-    def _do_EOF(self, argv):
-        # Default EOF handler
-        # Note: an actual EOF is redirected to this command.
-        #TODO: separate name for this. Currently it is available from
-        #      command-line. Is that okay?
-        self.stdout.write('\n')
-        self.stdout.flush()
-        self.stop = True
-
-    def emptyline(self):
-        # Different from cmd.Cmd: don't repeat the last command for an
-        # emptyline.
-        if self.cmdlooping:
-            pass
-        else:
-            return self.do_help(["help"])
-
-
-#---- optparse.py extension to fix (IMO) some deficiencies
-#
-# See the class _OptionParserEx docstring for details.
-#
-
-class StopOptionProcessing(Exception):
-    """Indicate that option *and argument* processing should stop
-    cleanly. This is not an error condition. It is similar in spirit to
-    StopIteration. This is raised by _OptionParserEx's default "help"
-    and "version" option actions and can be raised by custom option
-    callbacks too.
-    
-    Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
-    usage is:
-
-        parser = CmdlnOptionParser(mycmd)
-        parser.add_option("-f", "--force", dest="force")
-        ...
-        try:
-            opts, args = parser.parse_args()
-        except StopOptionProcessing:
-            # normal termination, "--help" was probably given
-            sys.exit(0)
-    """
-
-class _OptionParserEx(optparse.OptionParser):
-    """An optparse.OptionParser that uses exceptions instead of sys.exit.
-
-    This class is an extension of optparse.OptionParser that differs
-    as follows:
-    - Correct (IMO) the default OptionParser error handling to never
-      sys.exit(). Instead OptParseError exceptions are passed through.
-    - Add the StopOptionProcessing exception (a la StopIteration) to
-      indicate normal termination of option processing.
-      See StopOptionProcessing's docstring for details.
-
-    I'd also like to see the following in the core optparse.py, perhaps
-    as a RawOptionParser which would serve as a base class for the more
-    generally used OptionParser (that works as current):
-    - Remove the implicit addition of the -h|--help and --version
-      options. They can get in the way (e.g. if want '-?' and '-V' for
-      these as well) and it is not hard to do:
-        optparser.add_option("-h", "--help", action="help")
-        optparser.add_option("--version", action="version")
-      These are good practices, just not valid defaults if they can
-      get in the way.
-    """
-    def error(self, msg):
-        raise optparse.OptParseError(msg)
-
-    def exit(self, status=0, msg=None):
-        if status == 0:
-            raise StopOptionProcessing(msg)
-        else:
-            #TODO: don't lose status info here
-            raise optparse.OptParseError(msg)
-
-
-
-#---- optparse.py-based option processing support
-
-class CmdlnOptionParser(_OptionParserEx):
-    """An optparse.OptionParser class more appropriate for top-level
-    Cmdln options. For parsing of sub-command options, see
-    SubCmdOptionParser.
-
-    Changes:
-    - disable_interspersed_args() by default, because a Cmdln instance
-      has sub-commands which may themselves have options.
-    - Redirect print_help() to the Cmdln.do_help() which is better
-      equiped to handle the "help" action.
-    - error() will raise a CmdlnUserError: OptionParse.error() is meant
-      to be called for user errors. Raising a well-known error here can
-      make error handling clearer.
-    - Also see the changes in _OptionParserEx.
-    """
-    def __init__(self, cmdln, **kwargs):
-        self.cmdln = cmdln
-        kwargs["prog"] = self.cmdln.name
-        _OptionParserEx.__init__(self, **kwargs)
-        self.disable_interspersed_args()
-
-    def print_help(self, file=None):
-        self.cmdln.onecmd(["help"])
-
-    def error(self, msg):
-        raise CmdlnUserError(msg)
-
-
-class SubCmdOptionParser(_OptionParserEx):
-    def set_cmdln_info(self, cmdln, subcmd):
-        """Called by Cmdln to pass relevant info about itself needed
-        for print_help().
-        """
-        self.cmdln = cmdln
-        self.subcmd = subcmd
-
-    def print_help(self, file=None):
-        self.cmdln.onecmd(["help", self.subcmd])
-
-    def error(self, msg):
-        raise CmdlnUserError(msg)
-
-
-def option(*args, **kwargs):
-    """Decorator to add an option to the optparser argument of a Cmdln
-    subcommand.
-    
-    Example:
-        class MyShell(cmdln.Cmdln):
-            @cmdln.option("-f", "--force", help="force removal")
-            def do_remove(self, subcmd, opts, *args):
-                #...
-    """
-    #XXX Is there a possible optimization for many options to not have a
-    #    large stack depth here?
-    def decorate(f):
-        if not hasattr(f, "optparser"):
-            f.optparser = SubCmdOptionParser()
-        f.optparser.add_option(*args, **kwargs)
-        return f
-    return decorate
-
-
-class Cmdln(RawCmdln):
-    """An improved (on cmd.Cmd) framework for building multi-subcommand
-    scripts (think "svn" & "cvs") and simple shells (think "pdb" and
-    "gdb").
-
-    A simple example:
-
-        import cmdln
-
-        class MySVN(cmdln.Cmdln):
-            name = "svn"
-
-            @cmdln.aliases('stat', 'st')
-            @cmdln.option('-v', '--verbose', action='store_true'
-                          help='print verbose information')
-            def do_status(self, subcmd, opts, *paths):
-                print "handle 'svn status' command"
-
-            #...
-
-        if __name__ == "__main__":
-            shell = MySVN()
-            retval = shell.main()
-            sys.exit(retval)
-
-    'Cmdln' extends 'RawCmdln' by providing optparse option processing
-    integration.  See this class' _dispatch_cmd() docstring and
-    <http://trentm.com/projects/cmdln> for more information.
-    """
-    def _dispatch_cmd(self, handler, argv):
-        """Introspect sub-command handler signature to determine how to
-        dispatch the command. The raw handler provided by the base
-        'RawCmdln' class is still supported:
-
-            def do_foo(self, argv):
-                # 'argv' is the vector of command line args, argv[0] is
-                # the command name itself (i.e. "foo" or an alias)
-                pass
-
-        In addition, if the handler has more than 2 arguments option
-        processing is automatically done (using optparse):
-
-            @cmdln.option('-v', '--verbose', action='store_true')
-            def do_bar(self, subcmd, opts, *args):
-                # subcmd = <"bar" or an alias>
-                # opts = <an optparse.Values instance>
-                if opts.verbose:
-                    print "lots of debugging output..."
-                # args = <tuple of arguments>
-                for arg in args:
-                    bar(arg)
-
-        TODO: explain that "*args" can be other signatures as well.
-
-        The `cmdln.option` decorator corresponds to an `add_option()`
-        method call on an `optparse.OptionParser` instance.
-
-        You can declare a specific number of arguments:
-
-            @cmdln.option('-v', '--verbose', action='store_true')
-            def do_bar2(self, subcmd, opts, bar_one, bar_two):
-                #...
-
-        and an appropriate error message will be raised/printed if the
-        command is called with a different number of args.
-        """
-        co_argcount = handler.im_func.func_code.co_argcount
-        if co_argcount == 2:   # handler ::= do_foo(self, argv)
-            return handler(argv)
-        elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
-            try:
-                optparser = handler.optparser
-            except AttributeError:
-                optparser = handler.im_func.optparser = SubCmdOptionParser()
-            assert isinstance(optparser, SubCmdOptionParser)
-            optparser.set_cmdln_info(self, argv[0])
-            try:
-                opts, args = optparser.parse_args(argv[1:])
-            except StopOptionProcessing:
-                #TODO: this doesn't really fly for a replacement of
-                #      optparse.py behaviour, does it?
-                return 0 # Normal command termination
-
-            try:
-                return handler(argv[0], opts, *args)
-            except TypeError, ex:
-                # Some TypeError's are user errors:
-                #   do_foo() takes at least 4 arguments (3 given)
-                #   do_foo() takes at most 5 arguments (6 given)
-                #   do_foo() takes exactly 5 arguments (6 given)
-                # Raise CmdlnUserError for these with a suitably
-                # massaged error message.
-                import sys
-                tb = sys.exc_info()[2] # the traceback object
-                if tb.tb_next is not None:
-                    # If the traceback is more than one level deep, then the
-                    # TypeError do *not* happen on the "handler(...)" call
-                    # above. In that we don't want to handle it specially
-                    # here: it would falsely mask deeper code errors.
-                    raise
-                msg = ex.args[0]
-                match = _INCORRECT_NUM_ARGS_RE.search(msg)
-                if match:
-                    msg = list(match.groups())
-                    msg[1] = int(msg[1]) - 3
-                    if msg[1] == 1:
-                        msg[2] = msg[2].replace("arguments", "argument")
-                    msg[3] = int(msg[3]) - 3
-                    msg = ''.join(map(str, msg))
-                    raise CmdlnUserError(msg)
-                else:
-                    raise
-        else:
-            raise CmdlnError("incorrect argcount for %s(): takes %d, must "
-                             "take 2 for 'argv' signature or 3+ for 'opts' "
-                             "signature" % (handler.__name__, co_argcount))
-        
-
-
-#---- internal support functions
-
-def _format_linedata(linedata, indent, indent_width):
-    """Format specific linedata into a pleasant layout.
-    
-        "linedata" is a list of 2-tuples of the form:
-            (<item-display-string>, <item-docstring>)
-        "indent" is a string to use for one level of indentation
-        "indent_width" is a number of columns by which the
-            formatted data will be indented when printed.
-
-    The <item-display-string> column is held to 15 columns.
-    """
-    lines = []
-    WIDTH = 78 - indent_width
-    SPACING = 2
-    NAME_WIDTH_LOWER_BOUND = 13
-    NAME_WIDTH_UPPER_BOUND = 16
-    NAME_WIDTH = max([len(s) for s,d in linedata])
-    if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
-        NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
-    else:
-        NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
-
-    DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
-    for namestr, doc in linedata:
-        line = indent + namestr
-        if len(namestr) <= NAME_WIDTH:
-            line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
-        else:
-            lines.append(line)
-            line = indent + ' ' * (NAME_WIDTH + SPACING)
-        line += _summarize_doc(doc, DOC_WIDTH)
-        lines.append(line.rstrip())
-    return lines
-
-def _summarize_doc(doc, length=60):
-    r"""Parse out a short one line summary from the given doclines.
-    
-        "doc" is the doc string to summarize.
-        "length" is the max length for the summary
-
-    >>> _summarize_doc("this function does this")
-    'this function does this'
-    >>> _summarize_doc("this function does this", 10)
-    'this fu...'
-    >>> _summarize_doc("this function does this\nand that")
-    'this function does this and that'
-    >>> _summarize_doc("this function does this\n\nand that")
-    'this function does this'
-    """
-    import re
-    if doc is None:
-        return ""
-    assert length > 3, "length <= 3 is absurdly short for a doc summary"
-    doclines = doc.strip().splitlines(0)
-    if not doclines:
-        return ""
-
-    summlines = []
-    for i, line in enumerate(doclines):
-        stripped = line.strip()
-        if not stripped:
-            break
-        summlines.append(stripped)
-        if len(''.join(summlines)) >= length:
-            break
-
-    summary = ' '.join(summlines)
-    if len(summary) > length:
-        summary = summary[:length-3] + "..." 
-    return summary
-
-
-def line2argv(line):
-    r"""Parse the given line into an argument vector.
-    
-        "line" is the line of input to parse.
-
-    This may get niggly when dealing with quoting and escaping. The
-    current state of this parsing may not be completely thorough/correct
-    in this respect.
-    
-    >>> from cmdln import line2argv
-    >>> line2argv("foo")
-    ['foo']
-    >>> line2argv("foo bar")
-    ['foo', 'bar']
-    >>> line2argv("foo bar ")
-    ['foo', 'bar']
-    >>> line2argv(" foo bar")
-    ['foo', 'bar']
-
-    Quote handling:
-    
-    >>> line2argv("'foo bar'")
-    ['foo bar']
-    >>> line2argv('"foo bar"')
-    ['foo bar']
-    >>> line2argv(r'"foo\"bar"')
-    ['foo"bar']
-    >>> line2argv("'foo bar' spam")
-    ['foo bar', 'spam']
-    >>> line2argv("'foo 'bar spam")
-    ['foo bar', 'spam']
-    
-    >>> line2argv('some\tsimple\ttests')
-    ['some', 'simple', 'tests']
-    >>> line2argv('a "more complex" test')
-    ['a', 'more complex', 'test']
-    >>> line2argv('a more="complex test of " quotes')
-    ['a', 'more=complex test of ', 'quotes']
-    >>> line2argv('a more" complex test of " quotes')
-    ['a', 'more complex test of ', 'quotes']
-    >>> line2argv('an "embedded \\"quote\\""')
-    ['an', 'embedded "quote"']
-
-    # Komodo bug 48027
-    >>> line2argv('foo bar C:\\')
-    ['foo', 'bar', 'C:\\']
-
-    # Komodo change 127581
-    >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
-    ['\\test\\slash', 'foo bar', 'foo"bar']
-
-    # Komodo change 127629
-    >>> if sys.platform == "win32":
-    ...     line2argv(r'\foo\bar') == ['\\foo\\bar']
-    ...     line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
-    ...     line2argv('"foo') == ['foo']
-    ... else:
-    ...     line2argv(r'\foo\bar') == ['foobar']
-    ...     line2argv(r'\\foo\\bar') == ['\\foo\\bar']
-    ...     try:
-    ...         line2argv('"foo')
-    ...     except ValueError, ex:
-    ...         "not terminated" in str(ex)
-    True
-    True
-    True
-    """
-    import string
-    line = line.strip()
-    argv = []
-    state = "default"
-    arg = None  # the current argument being parsed
-    i = -1
-    while 1:
-        i += 1
-        if i >= len(line): break
-        ch = line[i]
-
-        if ch == "\\" and i+1 < len(line):
-            # escaped char always added to arg, regardless of state
-            if arg is None: arg = ""
-            if (sys.platform == "win32"
-                or state in ("double-quoted", "single-quoted")
-               ) and line[i+1] not in tuple('"\''):
-                arg += ch
-            i += 1
-            arg += line[i]
-            continue
-
-        if state == "single-quoted":
-            if ch == "'":
-                state = "default"
-            else:
-                arg += ch
-        elif state == "double-quoted":
-            if ch == '"':
-                state = "default"
-            else:
-                arg += ch
-        elif state == "default":
-            if ch == '"':
-                if arg is None: arg = ""
-                state = "double-quoted"
-            elif ch == "'":
-                if arg is None: arg = ""
-                state = "single-quoted"
-            elif ch in string.whitespace:
-                if arg is not None:
-                    argv.append(arg)
-                arg = None
-            else:
-                if arg is None: arg = ""
-                arg += ch
-    if arg is not None:
-        argv.append(arg)
-    if not sys.platform == "win32" and state != "default":
-        raise ValueError("command line is not terminated: unfinished %s "
-                         "segment" % state)
-    return argv
-
-
-def argv2line(argv):
-    r"""Put together the given argument vector into a command line.
-    
-        "argv" is the argument vector to process.
-    
-    >>> from cmdln import argv2line
-    >>> argv2line(['foo'])
-    'foo'
-    >>> argv2line(['foo', 'bar'])
-    'foo bar'
-    >>> argv2line(['foo', 'bar baz'])
-    'foo "bar baz"'
-    >>> argv2line(['foo"bar'])
-    'foo"bar'
-    >>> print argv2line(['foo" bar'])
-    'foo" bar'
-    >>> print argv2line(["foo' bar"])
-    "foo' bar"
-    >>> argv2line(["foo'bar"])
-    "foo'bar"
-    """
-    escapedArgs = []
-    for arg in argv:
-        if ' ' in arg and '"' not in arg:
-            arg = '"'+arg+'"'
-        elif ' ' in arg and "'" not in arg:
-            arg = "'"+arg+"'"
-        elif ' ' in arg:
-            arg = arg.replace('"', r'\"')
-            arg = '"'+arg+'"'
-        escapedArgs.append(arg)
-    return ' '.join(escapedArgs)
-
-
-# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
-def _dedentlines(lines, tabsize=8, skip_first_line=False):
-    """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
-    
-        "lines" is a list of lines to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-    
-    Same as dedent() except operates on a sequence of lines. Note: the
-    lines list is modified **in-place**.
-    """
-    DEBUG = False
-    if DEBUG: 
-        print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
-              % (tabsize, skip_first_line)
-    indents = []
-    margin = None
-    for i, line in enumerate(lines):
-        if i == 0 and skip_first_line: continue
-        indent = 0
-        for ch in line:
-            if ch == ' ':
-                indent += 1
-            elif ch == '\t':
-                indent += tabsize - (indent % tabsize)
-            elif ch in '\r\n':
-                continue # skip all-whitespace lines
-            else:
-                break
-        else:
-            continue # skip all-whitespace lines
-        if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
-        if margin is None:
-            margin = indent
-        else:
-            margin = min(margin, indent)
-    if DEBUG: print "dedent: margin=%r" % margin
-
-    if margin is not None and margin > 0:
-        for i, line in enumerate(lines):
-            if i == 0 and skip_first_line: continue
-            removed = 0
-            for j, ch in enumerate(line):
-                if ch == ' ':
-                    removed += 1
-                elif ch == '\t':
-                    removed += tabsize - (removed % tabsize)
-                elif ch in '\r\n':
-                    if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
-                    lines[i] = lines[i][j:]
-                    break
-                else:
-                    raise ValueError("unexpected non-whitespace char %r in "
-                                     "line %r while removing %d-space margin"
-                                     % (ch, line, margin))
-                if DEBUG:
-                    print "dedent: %r: %r -> removed %d/%d"\
-                          % (line, ch, removed, margin)
-                if removed == margin:
-                    lines[i] = lines[i][j+1:]
-                    break
-                elif removed > margin:
-                    lines[i] = ' '*(removed-margin) + lines[i][j+1:]
-                    break
-    return lines
-
-def _dedent(text, tabsize=8, skip_first_line=False):
-    """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
-
-        "text" is the text to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-    
-    textwrap.dedent(s), but don't expand tabs to spaces
-    """
-    lines = text.splitlines(1)
-    _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
-    return ''.join(lines)
-
-
-def _get_indent(marker, s, tab_width=8):
-    """_get_indent(marker, s, tab_width=8) ->
-        (<indentation-of-'marker'>, <indentation-width>)"""
-    # Figure out how much the marker is indented.
-    INDENT_CHARS = tuple(' \t')
-    start = s.index(marker)
-    i = start
-    while i > 0:
-        if s[i-1] not in INDENT_CHARS:
-            break
-        i -= 1
-    indent = s[i:start]
-    indent_width = 0
-    for ch in indent:
-        if ch == ' ':
-            indent_width += 1
-        elif ch == '\t':
-            indent_width += tab_width - (indent_width % tab_width)
-    return indent, indent_width
-
-def _get_trailing_whitespace(marker, s):
-    """Return the whitespace content trailing the given 'marker' in string 's',
-    up to and including a newline.
-    """
-    suffix = ''
-    start = s.index(marker) + len(marker)
-    i = start
-    while i < len(s):
-        if s[i] in ' \t':
-            suffix += s[i]
-        elif s[i] in '\r\n':
-            suffix += s[i]
-            if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
-                suffix += s[i+1]
-            break
-        else:
-            break
-        i += 1
-    return suffix
-
-
-
-#---- bash completion support
-# Note: This is still experimental. I expect to change this
-# significantly.
-#
-# To get Bash completion for a cmdln.Cmdln class, run the following
-# bash command:
-#   $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
-# For example:
-#   $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
-#
-#TODO: Simplify the above so don't have to given path to script (try to
-#      find it on PATH, if possible). Could also make class name
-#      optional if there is only one in the module (common case).
-
-if __name__ == "__main__" and len(sys.argv) == 6:
-    def _log(s):
-        return # no-op, comment out for debugging
-        from os.path import expanduser
-        fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
-        fout.write(str(s) + '\n')
-        fout.close()
-
-    # Recipe: module_from_path (1.0.1+)
-    def _module_from_path(path):
-        import imp, os, sys
-        path = os.path.expanduser(path)
-        dir = os.path.dirname(path) or os.curdir
-        name = os.path.splitext(os.path.basename(path))[0]
-        sys.path.insert(0, dir)
-        try:
-            iinfo = imp.find_module(name, [dir])
-            return imp.load_module(name, *iinfo)
-        finally:
-            sys.path.remove(dir)
-
-    def _get_bash_cplns(script_path, class_name, cmd_name,
-                        token, preceding_token):
-        _log('--')
-        _log('get_cplns(%r, %r, %r, %r, %r)'
-             % (script_path, class_name, cmd_name, token, preceding_token))
-        comp_line = os.environ["COMP_LINE"]
-        comp_point = int(os.environ["COMP_POINT"])
-        _log("COMP_LINE: %r" % comp_line)
-        _log("COMP_POINT: %r" % comp_point)
-
-        try:
-            script = _module_from_path(script_path)
-        except ImportError, ex:
-            _log("error importing `%s': %s" % (script_path, ex))
-            return []
-        shell = getattr(script, class_name)()
-        cmd_map = shell._get_canonical_map()
-        del cmd_map["EOF"]
-
-        # Determine if completing the sub-command name.
-        parts = comp_line[:comp_point].split(None, 1)
-        _log(parts)
-        if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
-            #TODO: if parts[1].startswith('-'): handle top-level opts
-            _log("complete sub-command names")
-            matches = {}
-            for name, canon_name in cmd_map.items():
-                if name.startswith(token):
-                    matches[name] = canon_name
-            if not matches:
-                return []
-            elif len(matches) == 1:
-                return matches.keys()
-            elif len(set(matches.values())) == 1:
-                return [matches.values()[0]]
-            else:
-                return matches.keys()
-
-        # Otherwise, complete options for the given sub-command.
-        #TODO: refine this so it does the right thing with option args
-        if token.startswith('-'):
-            cmd_name = comp_line.split(None, 2)[1]
-            try:
-                cmd_canon_name = cmd_map[cmd_name]
-            except KeyError:
-                return []
-            handler = shell._get_cmd_handler(cmd_canon_name)
-            optparser = getattr(handler, "optparser", None)
-            if optparser is None:
-                optparser = SubCmdOptionParser()
-            opt_strs = []
-            for option in optparser.option_list:
-                for opt_str in option._short_opts + option._long_opts:
-                    if opt_str.startswith(token):
-                        opt_strs.append(opt_str)
-            return opt_strs
-
-        return []
-
-    for cpln in _get_bash_cplns(*sys.argv[1:]):
-        print cpln
-


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -23,10 +23,12 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from yt.config import ytcfg
+ytcfg["yt","__command_line"] = "True"
+from yt.startup_tasks import parser, subparsers
 from yt.mods import *
 from yt.funcs import *
-import cmdln as cmdln
-import optparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
+import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
 import urllib, urllib2, base64
 
 def _fix_pf(arg):
@@ -40,13 +42,68 @@
         pf = load(arg[:-10])
     else:
         pf = load(arg)
-    if pf is None:
-        raise IOError
     return pf
 
+def _add_arg(sc, arg):
+    if isinstance(arg, types.StringTypes):
+        arg = _common_options[arg].copy()
+    argnames = []
+    if "short" in arg: argnames.append(arg.pop('short'))
+    if "long" in arg: argnames.append(arg.pop('long'))
+    sc.add_argument(*argnames, **arg)
+
+class YTCommand(object):
+    args = ()
+    name = None
+    description = ""
+    aliases = ()
+    npfs = 1
+
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if cls.name is not None:
+                sc = subparsers.add_parser(cls.name,
+                    description = cls.description,
+                    help = cls.description)
+                sc.set_defaults(func=cls.run)
+                for arg in cls.args:
+                    _add_arg(sc, arg)
+
+    @classmethod
+    def run(cls, args):
+        self = cls()
+        # Some commands need to be run repeatedly on parameter files
+        # In fact, this is the rule and the opposite is the exception
+        # BUT, we only want to parse the arguments once.
+        if cls.npfs > 1:
+            self(args)
+        else:
+            if len(getattr(args, "pf", [])) > 1:
+                pfs = args.pf
+                for pf in pfs:
+                    args.pf = pf
+                    self(args)
+            else:
+                args.pf = getattr(args, 'pf', [None])[0]
+                self(args)
+
+class GetParameterFiles(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string = None):
+        if len(values) == 1:
+            pfs = values
+        elif len(values) == 2 and namespace.basename is not None:
+            pfs = ["%s%04i" % (opts.basename, r)
+                   for r in range(int(values[0]), int(values[1]), opts.skip) ]
+        else:
+            pfs = values
+        namespace.pf = [_fix_pf(pf) for pf in pfs]
+
 _common_options = dict(
+    pf      = dict(short="pf", action=GetParameterFiles,
+                   nargs="+", help="Parameter files to run on"),
     axis    = dict(short="-a", long="--axis",
-                   action="store", type="int",
+                   action="store", type=int,
                    dest="axis", default=4,
                    help="Axis (4 for all three)"),
     log     = dict(short="-l", long="--log",
@@ -54,208 +111,173 @@
                    dest="takelog", default=True,
                    help="Take the log of the field?"),
     text    = dict(short="-t", long="--text",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="text", default=None,
                    help="Textual annotation"),
     field   = dict(short="-f", long="--field",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="field", default="Density",
                    help="Field to color by"),
     weight  = dict(short="-g", long="--weight",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="weight", default=None,
                    help="Field to weight projections with"),
-    cmap    = dict(short="", long="--colormap",
-                   action="store", type="string",
+    cmap    = dict(long="--colormap",
+                   action="store", type=str,
                    dest="cmap", default="jet",
                    help="Colormap name"),
     zlim    = dict(short="-z", long="--zlim",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="zlim", default=None,
                    nargs=2,
                    help="Color limits (min, max)"),
-    dex     = dict(short="", long="--dex",
-                   action="store", type="float",
+    dex     = dict(long="--dex",
+                   action="store", type=float,
                    dest="dex", default=None,
                    nargs=1,
                    help="Number of dex above min to display"),
     width   = dict(short="-w", long="--width",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="width", default=1.0,
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="unit", default='unitary',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="center", default=None,
                    nargs=3,
                    help="Center, space separated (-1 -1 -1 for max)"),
     bn      = dict(short="-b", long="--basename",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="basename", default=None,
                    help="Basename of parameter files"),
     output  = dict(short="-o", long="--output",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="output", default="frames/",
                    help="Folder in which to place output images"),
     outputfn= dict(short="-o", long="--output",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="output", default=None,
                    help="File in which to place output"),
     skip    = dict(short="-s", long="--skip",
-                   action="store", type="int",
+                   action="store", type=int,
                    dest="skip", default=1,
                    help="Skip factor for outputs"),
     proj    = dict(short="-p", long="--projection",
                    action="store_true", 
                    dest="projection", default=False,
                    help="Use a projection rather than a slice"),
-    maxw    = dict(short="", long="--max-width",
-                   action="store", type="float",
+    maxw    = dict(long="--max-width",
+                   action="store", type=float,
                    dest="max_width", default=1.0,
                    help="Maximum width in code units"),
-    minw    = dict(short="", long="--min-width",
-                   action="store", type="float",
+    minw    = dict(long="--min-width",
+                   action="store", type=float,
                    dest="min_width", default=50,
                    help="Minimum width in units of smallest dx (default: 50)"),
     nframes = dict(short="-n", long="--nframes",
-                   action="store", type="int",
+                   action="store", type=int,
                    dest="nframes", default=100,
                    help="Number of frames to generate"),
-    slabw   = dict(short="", long="--slab-width",
-                   action="store", type="float",
+    slabw   = dict(long="--slab-width",
+                   action="store", type=float,
                    dest="slab_width", default=1.0,
                    help="Slab width in specified units"),
     slabu   = dict(short="-g", long="--slab-unit",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="slab_unit", default='1',
                    help="Desired units for the slab"),
-    ptype   = dict(short="", long="--particle-type",
-                   action="store", type="int",
+    ptype   = dict(long="--particle-type",
+                   action="store", type=int,
                    dest="ptype", default=2,
                    help="Particle type to select"),
-    agecut  = dict(short="", long="--age-cut",
-                   action="store", type="float",
+    agecut  = dict(long="--age-cut",
+                   action="store", type=float,
                    dest="age_filter", default=None,
                    nargs=2,
                    help="Bounds for the field to select"),
-    uboxes  = dict(short="", long="--unit-boxes",
+    uboxes  = dict(long="--unit-boxes",
                    action="store_true",
                    dest="unit_boxes",
                    help="Display helpful unit boxes"),
-    thresh  = dict(short="", long="--threshold",
-                   action="store", type="float",
+    thresh  = dict(long="--threshold",
+                   action="store", type=float,
                    dest="threshold", default=None,
                    help="Density threshold"),
-    dm_only = dict(short="", long="--all-particles",
+    dm_only = dict(long="--all-particles",
                    action="store_false", 
                    dest="dm_only", default=True,
                    help="Use all particles"),
-    grids   = dict(short="", long="--show-grids",
+    grids   = dict(long="--show-grids",
                    action="store_true",
                    dest="grids", default=False,
                    help="Show the grid boundaries"),
-    time    = dict(short="", long="--time",
+    time    = dict(long="--time",
                    action="store_true",
                    dest="time", default=False,
                    help="Print time in years on image"),
-    contours    = dict(short="", long="--contours",
-                   action="store",type="int",
+    contours    = dict(long="--contours",
+                   action="store",type=int,
                    dest="contours", default=None,
                    help="Number of Contours for Rendering"),
-    contour_width  = dict(short="", long="--contour_width",
-                   action="store",type="float",
+    contour_width  = dict(long="--contour_width",
+                   action="store",type=float,
                    dest="contour_width", default=None,
                    help="Width of gaussians used for rendering."),
-    enhance   = dict(short="", long="--enhance",
+    enhance   = dict(long="--enhance",
                    action="store_true",
                    dest="enhance", default=False,
                    help="Enhance!"),
     valrange  = dict(short="-r", long="--range",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="valrange", default=None,
                    nargs=2,
                    help="Range, space separated"),
-    up  = dict(short="", long="--up",
-                   action="store", type="float",
+    up  = dict(long="--up",
+                   action="store", type=float,
                    dest="up", default=None,
                    nargs=3,
                    help="Up, space separated"),
-    viewpoint  = dict(short="", long="--viewpoint",
-                   action="store", type="float",
+    viewpoint  = dict(long="--viewpoint",
+                   action="store", type=float,
                    dest="viewpoint", default=[1., 1., 1.],
                    nargs=3,
                    help="Viewpoint, space separated"),
-    pixels    = dict(short="", long="--pixels",
-                   action="store",type="int",
+    pixels    = dict(long="--pixels",
+                   action="store",type=int,
                    dest="pixels", default=None,
                    help="Number of Pixels for Rendering"),
-    halos   = dict(short="", long="--halos",
-                   action="store", type="string",
+    halos   = dict(long="--halos",
+                   action="store", type=str,
                    dest="halos",default="multiple",
                    help="Run halo profiler on a 'single' halo or 'multiple' halos."),
-    halo_radius = dict(short="", long="--halo_radius",
-                       action="store", type="float",
+    halo_radius = dict(long="--halo_radius",
+                       action="store", type=float,
                        dest="halo_radius",default=0.1,
                        help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1."),
-    halo_radius_units = dict(short="", long="--halo_radius_units",
-                             action="store", type="string",
+    halo_radius_units = dict(long="--halo_radius_units",
+                             action="store", type=str,
                              dest="halo_radius_units",default="1",
                              help="Units for radius used with --halo_radius flag. Default: '1' (code units)."),
-    halo_hop_style = dict(short="", long="--halo_hop_style",
-                          action="store", type="string",
+    halo_hop_style = dict(long="--halo_hop_style",
+                          action="store", type=str,
                           dest="halo_hop_style",default="new",
                           help="Style of hop output file.  'new' for yt_hop files and 'old' for enzo_hop files."),
-    halo_parameter_file = dict(short="", long="--halo_parameter_file",
-                               action="store", type="string",
+    halo_parameter_file = dict(long="--halo_parameter_file",
+                               action="store", type=str,
                                dest="halo_parameter_file",default=None,
                                help="HaloProfiler parameter file."),
-    make_profiles = dict(short="", long="--make_profiles",
+    make_profiles = dict(long="--make_profiles",
                          action="store_true", default=False,
                          help="Make profiles with halo profiler."),
-    make_projections = dict(short="", long="--make_projections",
+    make_projections = dict(long="--make_projections",
                             action="store_true", default=False,
                             help="Make projections with halo profiler.")
 
     )
 
-def _add_options(parser, *options):
-    for opt in options:
-        oo = _common_options[opt].copy()
-        parser.add_option(oo.pop("short"), oo.pop("long"), **oo)
-
-def _get_parser(*options):
-    parser = optparse.OptionParser()
-    _add_options(parser, *options)
-    return parser
-
-def add_cmd_options(options):
-    opts = []
-    for option in options:
-        vals = _common_options[option].copy()
-        opts.append(([vals.pop("short"), vals.pop("long")],
-                      vals))
-    def apply_options(func):
-        for args, kwargs in opts:
-            func = cmdln.option(*args, **kwargs)(func)
-        return func
-    return apply_options
-
-def check_args(func):
-    @wraps(func)
-    def arg_iterate(self, subcmd, opts, *args):
-        if len(args) == 1:
-            pfs = args
-        elif len(args) == 2 and opts.basename is not None:
-            pfs = ["%s%04i" % (opts.basename, r)
-                   for r in range(int(args[0]), int(args[1]), opts.skip) ]
-        else: pfs = args
-        for arg in pfs:
-            func(self, subcmd, opts, arg)
-    return arg_iterate
-
 def _update_hg(path, skip_rebuild = False):
     from mercurial import hg, ui, commands
     f = open(os.path.join(path, "yt_updater.log"), "a")
@@ -355,20 +377,16 @@
     # Now we think we have our supplemental repository.
     return supp_path
 
-class YTCommands(cmdln.Cmdln):
-    name="yt"
 
-    def __init__(self, *args, **kwargs):
-        cmdln.Cmdln.__init__(self, *args, **kwargs)
-        cmdln.Cmdln.do_help.aliases.append("h")
-
-    def do_update(self, subcmd, opts):
+class YTUpdateCmd(YTCommand):
+    name = "update"
+    description = \
         """
         Update the yt installation to the most recent version
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
+
+    def __call__(self, opts):
         import pkg_resources
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
@@ -385,7 +403,7 @@
                 update_supp = True
         vstring = None
         if "site-packages" not in path:
-            vstring = _get_hg_version(path)
+            vstring = get_hg_version(path)
             print
             print "The current version of the code is:"
             print
@@ -394,7 +412,7 @@
             print "---"
             print
             print "This installation CAN be automatically updated."
-            _update_hg(path)
+            update_hg(path)
             print "Updated successfully."
         else:
             print
@@ -404,19 +422,24 @@
             print "updating to the newest changeset."
             print
 
-    @cmdln.option("-u", "--update-source", action="store_true",
-                  default = False,
-                  help="Update the yt installation, if able")
-    @cmdln.option("-o", "--output-version", action="store",
+class YTInstInfoCmd(YTCommand):
+    name = "instinfo"
+    args = (
+            dict(short="-u", long="--update-source", action="store_true",
+                 default = False,
+                 help="Update the yt installation, if able"),
+            dict(short="-o", long="--output-version", action="store",
                   default = None, dest="outputfile",
-                  help="File into which the current revision number will be stored")
-    def do_instinfo(self, subcmd, opts):
+                  help="File into which the current revision number will be" +
+                       "stored")
+           )
+    description = \
         """
         Get some information about the yt installation
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
+
+    def __call__(self, opts):
         import pkg_resources
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
@@ -433,7 +456,7 @@
                 update_supp = True
         vstring = None
         if "site-packages" not in path:
-            vstring = _get_hg_version(path)
+            vstring = get_hg_version(path)
             print
             print "The current version of the code is:"
             print
@@ -443,7 +466,7 @@
             print
             print "This installation CAN be automatically updated."
             if opts.update_source:  
-                _update_hg(path)
+                update_hg(path)
             print "Updated successfully."
         elif opts.update_source:
             print
@@ -455,15 +478,18 @@
         if vstring is not None and opts.outputfile is not None:
             open(opts.outputfile, "w").write(vstring)
 
-    def do_load(self, subcmd, opts, arg):
+class YTLoadCmd(YTCommand):
+    name = "load"
+    description = \
         """
         Load a single dataset into an IPython instance
 
-        ${cmd_option_list}
         """
-        try:
-            pf = _fix_pf(arg)
-        except IOError:
+
+    args = ("pf", )
+
+    def __call__(self, args):
+        if args.pf is None:
             print "Could not load file."
             sys.exit()
         import yt.mods
@@ -475,7 +501,7 @@
             api_version = '0.11'
 
         local_ns = yt.mods.__dict__.copy()
-        local_ns['pf'] = pf
+        local_ns['pf'] = args.pf
 
         if api_version == '0.10':
             shell = IPython.Shell.IPShellEmbed()
@@ -491,159 +517,177 @@
             from IPython.frontend.terminal.embed import InteractiveShellEmbed
             ipshell = InteractiveShellEmbed(config=cfg)
 
-    @add_cmd_options(['outputfn','bn','thresh','dm_only','skip'])
-    @check_args
-    def do_hop(self, subcmd, opts, arg):
+class YTHopCmd(YTCommand):
+    args = ('outputfn','bn','thresh','dm_only','skip', 'pf')
+    name = "hop"
+    description = \
         """
         Run HOP on one or more datasets
 
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
-        kwargs = {'dm_only' : opts.dm_only}
-        if opts.threshold is not None: kwargs['threshold'] = opts.threshold
+
+    def __call__(self, args):
+        pf = args.pf
+        kwargs = {'dm_only' : args.dm_only}
+        if args.threshold is not None: kwargs['threshold'] = args.threshold
         hop_list = HaloFinder(pf, **kwargs)
-        if opts.output is None: fn = "%s.hop" % pf
-        else: fn = opts.output
+        if args.output is None: fn = "%s.hop" % pf
+        else: fn = args.output
         hop_list.write_out(fn)
 
-    @add_cmd_options(['make_profiles','make_projections','halo_parameter_file',
-                      'halos','halo_hop_style','halo_radius','halo_radius_units'])
-    def do_halos(self, subcmd, opts, arg):
+class YTHalosCmd(YTCommand):
+    name = "halos"
+    args = ('make_profiles','make_projections','halo_parameter_file',
+            'halos','halo_hop_style','halo_radius','halo_radius_units', 'pf')
+    description = \
         """
         Run HaloProfiler on one dataset
 
-        ${cmd_option_list}
         """
+    def __call__(self, args):
         import yt.analysis_modules.halo_profiler.api as HP
-        kwargs = {'halos': opts.halos,
-                  'halo_radius': opts.halo_radius,
-                  'radius_units': opts.halo_radius_units}
+        kwargs = {'halos': args.halos,
+                  'halo_radius': args.halo_radius,
+                  'radius_units': args.halo_radius_units}
 
-        hp = HP.HaloProfiler(arg,opts.halo_parameter_file,**kwargs)
-        if opts.make_profiles:
+        hp = HP.HaloProfiler(arg,args.halo_parameter_file,**kwargs)
+        if args.make_profiles:
             hp.make_profiles()
-        if opts.make_projections:
+        if args.make_projections:
             hp.make_projections()
 
-    @add_cmd_options(["width", "unit", "bn", "proj", "center",
-                      "zlim", "axis", "field", "weight", "skip",
-                      "cmap", "output", "grids", "time"])
-    @check_args
-    def do_plot(self, subcmd, opts, arg):
+class YTPlotCmd(YTCommand):
+    args = ("width", "unit", "bn", "proj", "center",
+            "zlim", "axis", "field", "weight", "skip",
+            "cmap", "output", "grids", "time", "pf")
+    name = "plot"
+    
+    description = \
         """
         Create a set of images
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
-        center = opts.center
-        if opts.center == (-1,-1,-1):
+
+    def __call__(self, args):
+        pf = args.pf
+        center = args.center
+        if args.center == (-1,-1,-1):
             mylog.info("No center fed in; seeking.")
             v, center = pf.h.find_max("Density")
-        elif opts.center is None:
+        elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = na.array(center)
         pc=PlotCollection(pf, center=center)
-        if opts.axis == 4:
+        if args.axis == 4:
             axes = range(3)
         else:
-            axes = [opts.axis]
+            axes = [args.axis]
         for ax in axes:
             mylog.info("Adding plot for axis %i", ax)
-            if opts.projection: pc.add_projection(opts.field, ax,
-                                    weight_field=opts.weight, center=center)
-            else: pc.add_slice(opts.field, ax, center=center)
-            if opts.grids: pc.plots[-1].modify["grids"]()
-            if opts.time: 
+            if args.projection: pc.add_projection(args.field, ax,
+                                    weight_field=args.weight, center=center)
+            else: pc.add_slice(args.field, ax, center=center)
+            if args.grids: pc.plots[-1].modify["grids"]()
+            if args.time: 
                 time = pf.current_time*pf['Time']*pf['years']
                 pc.plots[-1].modify["text"]((0.2,0.8), 't = %5.2e yr'%time)
-        pc.set_width(opts.width, opts.unit)
-        pc.set_cmap(opts.cmap)
-        if opts.zlim: pc.set_zlim(*opts.zlim)
-        if not os.path.isdir(opts.output): os.makedirs(opts.output)
-        pc.save(os.path.join(opts.output,"%s" % (pf)))
+        pc.set_width(args.width, args.unit)
+        pc.set_cmap(args.cmap)
+        if args.zlim: pc.set_zlim(*args.zlim)
+        if not os.path.isdir(args.output): os.makedirs(args.output)
+        pc.save(os.path.join(args.output,"%s" % (pf)))
 
-    @add_cmd_options(["proj", "field", "weight"])
-    @cmdln.option("-a", "--axis", action="store", type="int",
-                   dest="axis", default=0, help="Axis (4 for all three)")
-    @cmdln.option("-o", "--host", action="store", type="string",
-                   dest="host", default=None, help="IP Address to bind on")
-    @check_args
-    def do_mapserver(self, subcmd, opts, arg):
+class YTMapserverCmd(YTCommand):
+    args = ("proj", "field", "weight",
+            dict(short="-a", long="--axis", action="store", type=int,
+                 dest="axis", default=0, help="Axis (4 for all three)"),
+            dict(short ="-o", long="--host", action="store", type=str,
+                   dest="host", default=None, help="IP Address to bind on"),
+            "pf",
+            )
+    
+    name = "mapserver"
+    description = \
         """
         Serve a plot in a GMaps-style interface
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
+
+    def __call__(self, args):
+        pf = args.pf
         pc=PlotCollection(pf, center=0.5*(pf.domain_left_edge +
                                           pf.domain_right_edge))
-        if opts.axis == 4:
+        if args.axis == 4:
             print "Doesn't work with multiple axes!"
             return
-        if opts.projection:
-            p = pc.add_projection(opts.field, opts.axis, weight_field=opts.weight)
+        if args.projection:
+            p = pc.add_projection(args.field, args.axis, weight_field=args.weight)
         else:
-            p = pc.add_slice(opts.field, opts.axis)
+            p = pc.add_slice(args.field, args.axis)
         from yt.gui.reason.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.data, opts.field)
+        mapper = PannableMapServer(p.data, args.field)
         import yt.utilities.bottle as bottle
         bottle.debug(True)
-        if opts.host is not None:
-            colonpl = opts.host.find(":")
+        if args.host is not None:
+            colonpl = args.host.find(":")
             if colonpl >= 0:
-                port = int(opts.host.split(":")[-1])
-                opts.host = opts.host[:colonpl]
+                port = int(args.host.split(":")[-1])
+                args.host = args.host[:colonpl]
             else:
                 port = 8080
-            bottle.run(server='rocket', host=opts.host, port=port)
+            bottle.run(server='rocket', host=args.host, port=port)
         else:
             bottle.run(server='rocket')
 
-    def do_rpdb(self, subcmd, opts, task):
+class YTRPDBCmd(YTCommand):
+    name = "rpdb"
+    description = \
         """
         Connect to a currently running (on localhost) rpd session.
 
         Commands run with --rpdb will trigger an rpdb session with any
         uncaught exceptions.
 
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         import rpdb
         rpdb.run_rpdb(int(task))
 
-    @add_cmd_options(['outputfn','bn','skip'])
-    @check_args
-    def do_stats(self, subcmd, opts, arg):
+class YTStatsCmd(YTCommand):
+    args = ('outputfn','bn','skip','pf')
+    name = "stats"
+    description = \
         """
         Print stats and maximum density for one or more datasets
 
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
+
+    def __call__(self, args):
+        pf = args.pf
         pf.h.print_stats()
         if "Density" in pf.h.field_list:
             v, c = pf.h.find_max("Density")
         print "Maximum density: %0.5e at %s" % (v, c)
-        if opts.output is not None:
+        if args.output is not None:
             t = pf.current_time * pf['years']
-            open(opts.output, "a").write(
+            open(args.output, "a").write(
                 "%s (%0.5e years): %0.5e at %s\n" % (pf, t, v, c))
 
-    @add_cmd_options([])
-    def _do_analyze(self, subcmd, opts, arg):
+class YTAnalyzeCmd(YTCommand):
+    
+    name = "analyze"
+    args = ('pf',)
+    description = \
         """
         Produce a set of analysis for a given output.  This includes
         HaloProfiler results with r200, as per the recipe file in the cookbook,
         profiles of a number of fields, projections of average Density and
         Temperature, and distribution functions for Density and Temperature.
 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         # We will do the following things:
         #   Halo profiling (default parameters ONLY)
         #   Projections: Density, Temperature
@@ -696,51 +740,48 @@
         ph.modify["line"](pr.field_data["Density"], pr.field_data["Temperature"])
         pc.save()
 
-    @cmdln.option("-d", "--desc", action="store",
-                  default = None, dest="desc",
-                  help="Description for this pasteboard entry")
-    def do_pasteboard(self, subcmd, opts, arg):
-        """
-        Place a file into your pasteboard.
-        """
-        if opts.desc is None: raise RuntimeError
-        from yt.utilities.pasteboard import PostInventory
-        pp = PostInventory()
-        pp.add_post(arg, desc=opts.desc)
-
-    @cmdln.option("-l", "--language", action="store",
+class YTPastebinCmd(YTCommand):
+    name = "pastebin"
+    args = (
+             dict(short="-l", long="--language", action="store",
                   default = None, dest="language",
-                  help="Use syntax highlighter for the file in language")
-    @cmdln.option("-L", "--languages", action="store_true",
+                  help="Use syntax highlighter for the file in language"),
+             dict(short="-L", long="--languages", action="store_true",
                   default = False, dest="languages",
-                  help="Retrive a list of supported languages")
-    @cmdln.option("-e", "--encoding", action="store",
+                  help="Retrive a list of supported languages"),
+             dict(short="-e", long="--encoding", action="store",
                   default = 'utf-8', dest="encoding",
                   help="Specify the encoding of a file (default is "
-                        "utf-8 or guessing if available)")
-    @cmdln.option("-b", "--open-browser", action="store_true",
+                        "utf-8 or guessing if available)"),
+             dict(short="-b", long="--open-browser", action="store_true",
                   default = False, dest="open_browser",
-                  help="Open the paste in a web browser")
-    @cmdln.option("-p", "--private", action="store_true",
+                  help="Open the paste in a web browser"),
+             dict(short="-p", long="--private", action="store_true",
                   default = False, dest="private",
-                  help="Paste as private")
-    @cmdln.option("-c", "--clipboard", action="store_true",
+                  help="Paste as private"),
+             dict(short="-c", long="--clipboard", action="store_true",
                   default = False, dest="clipboard",
-                  help="File to output to; else, print.")
-    def do_pastebin(self, subcmd, opts, arg):
+                  help="File to output to; else, print."),
+             dict(short="file", type=str),
+            )
+    description = \
         """
         Post a script to an anonymous pastebin
 
         Usage: yt pastebin [options] <script>
 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         import yt.utilities.lodgeit as lo
-        lo.main( arg, languages=opts.languages, language=opts.language,
-                 encoding=opts.encoding, open_browser=opts.open_browser,
-                 private=opts.private, clipboard=opts.clipboard)
+        lo.main(args.file, languages=args.languages, language=args.language,
+                 encoding=args.encoding, open_browser=args.open_browser,
+                 private=args.private, clipboard=args.clipboard)
 
-    def do_pastebin_grab(self, subcmd, opts, arg):
+class YTPastebinGrabCmd(YTCommand):
+    args = (dict(short="number", type=str),)
+    name = "pastebin_grab"
+    description = \
         """
         Print an online pastebin to STDOUT for local use. Paste ID is 
         the number at the end of the url.  So to locally access pastebin:
@@ -750,29 +791,21 @@
         Ex: yt pastebin_grab 1688 > script.py
 
         """
+
+    def __call__(self, args):
         import yt.utilities.lodgeit as lo
-        lo.main( None, download=arg )
+        lo.main( None, download=args.number )
 
-    @cmdln.option("-o", "--output", action="store",
-                  default = None, dest="output_fn",
-                  help="File to output to; else, print.")
-    def do_pasteboard_grab(self, subcmd, opts, username, paste_id):
-        """
-        Download from your or another user's pasteboard
 
-        ${cmd_usage} 
-        ${cmd_option_list}
-        """
-        from yt.utilities.pasteboard import retrieve_pastefile
-        retrieve_pastefile(username, paste_id, opts.output_fn)
-
-    def do_bugreport(self, subcmd, opts):
+class YTBugreportCmd(YTCommand):
+    name = "bureport"
+    description = \
         """
         Report a bug in yt
 
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         print "==============================================================="
         print
         print "Hi there!  Welcome to the yt bugreport taker."
@@ -879,13 +912,13 @@
         print "Keep in touch!"
         print
 
-    def do_bootstrap_dev(self, subcmd, opts):
+class YTBootstrapDevCmd(YTCommand):
+    name = "bootstrap_dev"
+    description = \
         """
         Bootstrap a yt development environment
-
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+    def __call__(self, args):
         from mercurial import hg, ui, commands
         import imp
         import getpass
@@ -895,7 +928,7 @@
         print "Hi there!  Welcome to the yt development bootstrap tool."
         print
         print "This should get you started with mercurial as well as a few"
-        print "other handy things, like a pasteboard of your very own."
+        print "other handy things"
         print
         # We have to do a couple things.
         # First, we check that YT_DEST is set.
@@ -917,7 +950,6 @@
         print " 1. Setting up your ~/.hgrc to have a username."
         print " 2. Setting up your bitbucket user account and the hgbb"
         print "    extension."
-        print " 3. Setting up a new pasteboard repository."
         print
         firstname = lastname = email_address = bbusername = repo_list = None
         # Now we try to import the cedit extension.
@@ -1090,89 +1122,6 @@
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])
-        # Now the only thing remaining to do is to set up the pasteboard
-        # repository.
-        # This is, unfortunately, the most difficult.
-        print
-        print "We are now going to set up a pasteboard. This is a mechanism"
-        print "for versioned posting of snippets, collaboration and"
-        print "discussion."
-        print
-        # Let's get the full list of repositories
-        pasteboard_name = "%s.bitbucket.org" % (bbusername.lower())
-        if repo_list is None:
-            rv = hgbb._bb_apicall(uu, "users/%s" % bbusername, None, False)
-            rv = json.loads(rv)
-            repo_list = rv['repositories']
-        create = True
-        for repo in repo_list:
-            if repo['name'] == pasteboard_name:
-                create = False
-        if create:
-            # Now we first create the repository, but we
-            # will only use the creation API, not the bbcreate command.
-            print
-            print "I am now going to create the repository:"
-            print "    ", pasteboard_name
-            print "on BitBucket.org.  This will set up the domain"
-            print "     http://%s" % (pasteboard_name)
-            print "which will point to the current contents of the repo."
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            data = dict(name=pasteboard_name)
-            hgbb._bb_apicall(uu, 'repositories', data)
-        # Now we clone
-        pasteboard_path = os.path.join(os.environ["YT_DEST"], "src",
-                                       pasteboard_name)
-        if os.path.isdir(pasteboard_path):
-            print "Found an existing clone of the pasteboard repo:"
-            print "    ", pasteboard_path
-        else:
-            print
-            print "I will now clone a copy of your pasteboard repo."
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            commands.clone(uu, "https://%s@bitbucket.org/%s/%s" % (
-                             bbusername, bbusername, pasteboard_name),
-                           pasteboard_path)
-            pbtemplate_path = os.path.join(supp_path, "pasteboard_template")
-            pb_hgrc_path = os.path.join(pasteboard_path, ".hg", "hgrc")
-            cedit.config.setoption(uu, [pb_hgrc_path],
-                                   "paths.pasteboard = " + pbtemplate_path)
-            if create:
-                # We have to pull in the changesets from the pasteboard.
-                pb_repo = hg.repository(uu, pasteboard_path)
-                commands.pull(uu, pb_repo,
-                              os.path.join(supp_path, "pasteboard_template"))
-        if ytcfg.get("yt","pasteboard_repo") != pasteboard_path:
-            print
-            print "Now setting the pasteboard_repo option in"
-            print "~/.yt/config to point to %s" % (pasteboard_path)
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            dotyt_path = os.path.expanduser("~/.yt")
-            if not os.path.isdir(dotyt_path):
-                print "There's no directory:"
-                print "    ", dotyt_path
-                print "I will now create it."
-                print
-                loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-                os.mkdir(dotyt_path)
-            ytcfg_path = os.path.expanduser("~/.yt/config")
-            cedit.config.setoption(uu, [ytcfg_path],
-                        "yt.pasteboard_repo=%s" % (pasteboard_path))
-        try:
-            import pygments
-            install_pygments = False
-        except ImportError:
-            install_pygments = True
-        if install_pygments:
-            print "You are missing the Pygments package.  Installing."
-            import pip
-            rv = pip.main(["install", "pygments"])
-            if rv == 1:
-                print "Unable to install Pygments.  Please report this bug to yt-users."
-                sys.exit(1)
         try:
             import lxml
             install_lxml = False
@@ -1188,27 +1137,32 @@
         print
         print "All done!"
         print
-        print "You're now set up to use the 'yt pasteboard' command"
-        print "as well as develop using Mercurial and BitBucket."
+        print "You're now set up to develop using Mercurial and BitBucket."
         print
         print "Good luck!"
 
-    @cmdln.option("-o", "--open-browser", action="store_true",
-                  default = False, dest='open_browser',
-                  help="Open a web browser.")
-    @cmdln.option("-p", "--port", action="store",
-                  default = 0, dest='port',
-                  help="Port to listen on")
-    @cmdln.option("-f", "--find", action="store_true",
-                  default = False, dest="find",
-                  help="At startup, find all *.hierarchy files in the CWD")
-    @cmdln.option("-d", "--debug", action="store_true",
-                  default = False, dest="debug",
-                  help="Add a debugging mode for cell execution")
-    def do_serve(self, subcmd, opts):
+class YTServeCmd(YTCommand):
+    name = "serve"
+    args = (
+            dict(short="-o", long="--open-browser", action="store_true",
+                 default = False, dest='open_browser',
+                 help="Open a web browser."),
+            dict(short="-p", long="--port", action="store",
+                 default = 0, dest='port',
+                 help="Port to listen on"),
+            dict(short="-f", long="--find", action="store_true",
+                 default = False, dest="find",
+                 help="At startup, find all *.hierarchy files in the CWD"),
+            dict(short="-d", long="--debug", action="store_true",
+                 default = False, dest="debug",
+                 help="Add a debugging mode for cell execution")
+            )
+    description = \
         """
         Run the Web GUI Reason
         """
+
+    def __call__(self, args):
         # We have to do a couple things.
         # First, we check that YT_DEST is set.
         if "YT_DEST" not in os.environ:
@@ -1217,18 +1171,18 @@
             print "*** to point to the installation location!        ***"
             print
             sys.exit(1)
-        if opts.port == 0:
+        if args.port == 0:
             # This means, choose one at random.  We do this by binding to a
             # socket and allowing the OS to choose the port for that socket.
             import socket
             sock = socket.socket()
             sock.bind(('', 0))
-            opts.port = sock.getsockname()[-1]
+            args.port = sock.getsockname()[-1]
             del sock
-        elif opts.port == '-1':
+        elif args.port == '-1':
             port = raw_input("Desired yt port? ")
             try:
-                opts.port = int(port)
+                args.port = int(port)
             except ValueError:
                 print "Please try a number next time."
                 return 1
@@ -1246,78 +1200,32 @@
         from yt.gui.reason.extdirect_repl import ExtDirectREPL
         from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
         hr = ExtDirectREPL(base_extjs_path)
-        hr.debug = PayloadHandler.debug = opts.debug
-        if opts.find:
+        hr.debug = PayloadHandler.debug = args.debug
+        if args.find:
             # We just have to find them and store references to them.
             command_line = ["pfs = []"]
             for fn in sorted(glob.glob("*/*.hierarchy")):
                 command_line.append("pfs.append(load('%s'))" % fn[:-10])
             hr.execute("\n".join(command_line))
         bottle.debug()
-        uuid_serve_functions(open_browser=opts.open_browser,
-                    port=int(opts.port), repl=hr)
+        uuid_serve_functions(open_browser=args.open_browser,
+                    port=int(args.port), repl=hr)
 
     
-    def _do_remote(self, subcmd, opts):
-        import getpass, sys, socket, time, webbrowser
-        import yt.utilities.pexpect as pex
-
-        host = raw_input('Hostname: ')
-        user = raw_input('User: ')
-        password = getpass.getpass('Password: ')
-
-        sock = socket.socket()
-        sock.bind(('', 0))
-        port = sock.getsockname()[-1]
-        del sock
-
-        child = pex.spawn('ssh -L %s:localhost:%s -l %s %s'%(port, port, user, host))
-        ssh_newkey = 'Are you sure you want to continue connecting'
-        i = child.expect([pex.TIMEOUT, ssh_newkey, 'password: '])
-        if i == 0: # Timeout
-            print 'ERROR!'
-            print 'SSH could not login. Here is what SSH said:'
-            print child.before, child.after
-            return 1
-        if i == 1: # SSH does not have the public key. Just accept it.
-            child.sendline ('yes')
-            child.expect ('password: ')
-            i = child.expect([pex.TIMEOUT, 'password: '])
-            if i == 0: # Timeout
-                print 'ERROR!'
-                print 'SSH could not login. Here is what SSH said:'
-                print child.before, child.after
-                return 1
-        print "Sending password"
-        child.sendline(password)
-        del password
-        print "Okay, sending serving command"
-        child.sendline('yt serve -p -1')
-        print "Waiting ..."
-        child.expect('Desired yt port?')
-        child.sendline("%s" % port)
-        child.expect('     http://localhost:([0-9]*)/(.+)/\r')
-        print "Got:", child.match.group(1), child.match.group(2)
-        port, urlprefix = child.match.group(1), child.match.group(2)
-        print "Sleeping one second and opening browser"
-        time.sleep(1)
-        webbrowser.open("http://localhost:%s/%s/" % (port, urlprefix))
-        print "Press Ctrl-C to terminate session"
-        child.readlines()
-        while 1:
-            time.sleep(1)
-
-    @cmdln.option("-R", "--repo", action="store", type="string",
-                  dest="repo", default=".", help="Repository to upload")
-    def do_hubsubmit(self, subcmd, opts):
+class YTHubSubmitCmd(YTCommand):
+    name = "hub_submit"
+    args = (
+            dict(long="--repo", action="store", type=str,
+                 dest="repo", default=".", help="Repository to upload"),
+           )
+    description = \
         """
         Submit a mercurial repository to the yt Hub
         (http://hub.yt-project.org/), creating a BitBucket repo in the process
         if necessary.
+        """
 
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
+    def __call__(self, args):
         import imp
         from mercurial import hg, ui, commands, error, config
         uri = "http://hub.yt-project.org/3rdparty/API/api.php"
@@ -1340,10 +1248,10 @@
             sys.exit(1)
         hgbb = imp.load_module("hgbb", *result)
         try:
-            repo = hg.repository(uu, opts.repo)
+            repo = hg.repository(uu, args.repo)
             conf = config.config()
-            if os.path.exists(os.path.join(opts.repo,".hg","hgrc")):
-                conf.read(os.path.join(opts.repo, ".hg", "hgrc"))
+            if os.path.exists(os.path.join(args.repo,".hg","hgrc")):
+                conf.read(os.path.join(args.repo, ".hg", "hgrc"))
             needs_bb = True
             if "paths" in conf.sections():
                 default = conf['paths'].get("default", "")
@@ -1358,7 +1266,7 @@
                             break
         except error.RepoError:
             print "Unable to find repo at:"
-            print "   %s" % (os.path.abspath(opts.repo))
+            print "   %s" % (os.path.abspath(args.repo))
             print
             print "Would you like to initialize one?  If this message"
             print "surprises you, you should perhaps press Ctrl-C to quit."
@@ -1369,8 +1277,8 @@
                 print "Okay, rad -- we'll let you handle it and get back to",
                 print " us."
                 return 1
-            commands.init(uu, dest=opts.repo)
-            repo = hg.repository(uu, opts.repo)
+            commands.init(uu, dest=args.repo)
+            repo = hg.repository(uu, args.repo)
             commands.add(uu, repo)
             commands.commit(uu, repo, message="Initial automated import by yt")
             needs_bb = True
@@ -1395,7 +1303,7 @@
                 print
                 print "to get set up and ready to go."
                 return 1
-            bb_repo_name = os.path.basename(os.path.abspath(opts.repo))
+            bb_repo_name = os.path.basename(os.path.abspath(args.repo))
             print
             print "I am now going to create the repository:"
             print "    ", bb_repo_name
@@ -1478,13 +1386,16 @@
         rv = urllib2.urlopen(req).read()
         print rv
 
-    def do_upload_image(self, subcmd, opts, filename):
+class YTUploadImageCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
         """
         Upload an image to imgur.com.  Must be PNG.
 
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+    name = "upload_image"
+    def __call__(self, args):
+        filename = args.file
         if not filename.endswith(".png"):
             print "File must be a PNG file!"
             return 1
@@ -1516,56 +1427,57 @@
             print
             pprint.pprint(rv)
 
-    @add_cmd_options(["width", "unit", "center","enhance",'outputfn',
-                      "field", "cmap", "contours", "viewpoint",
-                      "pixels","up","valrange","log","contour_width"])
-    @check_args
-    def do_render(self, subcmd, opts, arg):
+class YTRenderCmd(YTCommand):
+        
+    args = ("width", "unit", "center","enhance",'outputfn',
+            "field", "cmap", "contours", "viewpoint",
+            "pixels","up","valrange","log","contour_width", "pf")
+    name = "render"
+    description = \
         """
         Create a simple volume rendering
+        """
 
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-        pf = _fix_pf(arg)
-        center = opts.center
-        if opts.center == (-1,-1,-1):
+    def __call__(self, args):
+        pf = args.pf
+        center = args.center
+        if args.center == (-1,-1,-1):
             mylog.info("No center fed in; seeking.")
             v, center = pf.h.find_max("Density")
-        elif opts.center is None:
+        elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = na.array(center)
 
-        L = opts.viewpoint
+        L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(opts.viewpoint)
+        L = na.array(args.viewpoint)
 
-        unit = opts.unit
+        unit = args.unit
         if unit is None:
             unit = '1'
-        width = opts.width
+        width = args.width
         if width is None:
             width = 0.5*(pf.domain_right_edge - pf.domain_left_edge)
         width /= pf[unit]
 
-        N = opts.pixels
+        N = args.pixels
         if N is None:
             N = 512 
         
-        up = opts.up
+        up = args.up
         if up is None:
             up = [0.,0.,1.]
             
-        field = opts.field
+        field = args.field
         if field is None:
             field = 'Density'
         
-        log = opts.takelog
+        log = args.takelog
         if log is None:
             log = True
 
-        myrange = opts.valrange
+        myrange = args.valrange
         if myrange is None:
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
@@ -1574,13 +1486,13 @@
         else:
             mi, ma = myrange[0], myrange[1]
 
-        n_contours = opts.contours
+        n_contours = args.contours
         if n_contours is None:
             n_contours = 7
 
-        contour_width = opts.contour_width
+        contour_width = args.contour_width
 
-        cmap = opts.cmap
+        cmap = args.cmap
         if cmap is None:
             cmap = 'jet'
         tf = ColorTransferFunction((mi-2, ma+2))
@@ -1589,12 +1501,12 @@
         cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf)
         image = cam.snapshot()
 
-        if opts.enhance:
+        if args.enhance:
             for i in range(3):
                 image[:,:,i] = image[:,:,i]/(image[:,:,i].mean() + 5.*image[:,:,i].std())
             image[image>1.0]=1.0
             
-        save_name = opts.output
+        save_name = args.output
         if save_name is None:
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
@@ -1604,9 +1516,7 @@
         
 
 def run_main():
-    for co in ["--parallel", "--paste"]:
-        if co in sys.argv: del sys.argv[sys.argv.index(co)]
-    YT = YTCommands()
-    sys.exit(YT.main())
+    args = parser.parse_args()
+    args.func(args)
 
 if __name__ == "__main__": run_main()


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -515,3 +515,107 @@
     vec2 /= norm2
     vec3 = na.cross(vec1, vec2)
     return vec1, vec2, vec3
+
+def quartiles(a, axis=None, out=None, overwrite_input=False):
+    """
+    Compute the quartile values (25% and 75%) along the specified axis
+    in the same way that the numpy.median calculates the median (50%) value
+    alone a specified axis.  Check numpy.median for details, as it is
+    virtually the same algorithm.
+
+    Returns an array of the quartiles of the array elements [lower quartile, 
+    upper quartile].
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : {None, int}, optional
+        Axis along which the quartiles are computed. The default (axis=None)
+        is to compute the quartiles along a flattened version of the array.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output,
+        but the type (of the output) will be cast if necessary.
+    overwrite_input : {False, True}, optional
+       If True, then allow use of memory of input array (a) for
+       calculations. The input array will be modified by the call to
+       quartiles. This will save memory when you do not need to preserve
+       the contents of the input array. Treat the input as undefined,
+       but it will probably be fully or partially sorted. Default is
+       False. Note that, if `overwrite_input` is True and the input
+       is not already an ndarray, an error will be raised.
+
+    Returns
+    -------
+    quartiles : ndarray
+        A new 2D array holding the result (unless `out` is specified, in
+        which case that array is returned instead).  If the input contains
+        integers, or floats of smaller precision than 64, then the output
+        data-type is float64.  Otherwise, the output data-type is the same
+        as that of the input.
+
+    See Also
+    --------
+    numpy.median, numpy.mean, numpy.percentile
+
+    Notes
+    -----
+    Given a vector V of length N, the quartiles of V are the 25% and 75% values 
+    of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/4]`` and 
+    ``3*V_sorted[(N-1)/4]``, when N is odd.  When N is even, it is the average 
+    of the two values bounding these values of ``V_sorted``.
+
+    Examples
+    --------
+    >>> a = na.arange(100).reshape(10,10)
+    >>> a
+    array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
+           [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
+           [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+           [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
+           [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
+           [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
+           [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
+           [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
+           [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
+           [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
+    >>> mu.quartiles(a)
+    array([ 24.5,  74.5])
+    >>> mu.quartiles(a,axis=0)
+    array([[ 15.,  16.,  17.,  18.,  19.,  20.,  21.,  22.,  23.,  24.],
+           [ 65.,  66.,  67.,  68.,  69.,  70.,  71.,  72.,  73.,  74.]])
+    >>> mu.quartiles(a,axis=1)
+    array([[  1.5,  11.5,  21.5,  31.5,  41.5,  51.5,  61.5,  71.5,  81.5,
+             91.5],
+           [  6.5,  16.5,  26.5,  36.5,  46.5,  56.5,  66.5,  76.5,  86.5,
+             96.5]])
+    """
+    if overwrite_input:
+        if axis is None:
+            sorted = a.ravel()
+            sorted.sort()
+        else:
+            a.sort(axis=axis)
+            sorted = a
+    else:
+        sorted = na.sort(a, axis=axis)
+    if axis is None:
+        axis = 0
+    indexer = [slice(None)] * sorted.ndim
+    indices = [int(sorted.shape[axis]/4), int(sorted.shape[axis]*.75)]
+    result = []
+    for index in indices:
+        if sorted.shape[axis] % 2 == 1:
+            # index with slice to allow mean (below) to work
+            indexer[axis] = slice(index, index+1)
+        else:
+            indexer[axis] = slice(index-1, index+1)
+        # special cases for small arrays
+        if sorted.shape[axis] == 2:
+            # index with slice to allow mean (below) to work
+            indexer[axis] = slice(index, index+1)
+        # Use mean in odd and even case to coerce data type
+        # and check, use out array.
+        result.append(na.mean(sorted[indexer], axis=axis, out=out))
+    return na.array(result)


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/minimal_representation.py
--- /dev/null
+++ b/yt/utilities/minimal_representation.py
@@ -0,0 +1,106 @@
+"""
+Skeleton objects that represent a few fundamental yt data types.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import abc
+
+class ContainerClass(object):
+    pass
+
+class MinimalRepresentation(object):
+    __metaclass__ = abc.ABCMeta
+
+    def _update_attrs(self, obj, attr_list):
+        for attr in attr_list:
+            setattr(self, attr, getattr(obj, attr, None))
+        if hasattr(obj, "pf"):
+            self.output_hash = obj.pf._hash()
+
+    def __init__(self, obj):
+        self._update_attrs(obj, self._attr_list)
+
+    @abc.abstractmethod
+    def _generate_post(self):
+        pass
+
+    @abc.abstractproperty
+    def _attr_list(self):
+        pass
+
+    def _return_filtered_object(self, attrs):
+        new_attrs = tuple(attr for attr in self._attr_list
+                          if attr not in attrs)
+        new_class = type('Filtered%s' % self.__class__.__name__,
+                         (FilteredRepresentation,),
+                         {'_attr_list': new_attrs})
+        return new_class(self)
+
+    @property
+    def _attrs(self):
+        return dict( ((attr, getattr(self, attr)) for attr in self._attr_list) )
+
+    @classmethod
+    def _from_metadata(cls, metadata):
+        cc = ContainerClass()
+        for a, v in metadata.values():
+            setattr(cc, a, v)
+        return cls(cc)
+
+class FilteredRepresentation(MinimalRepresentation):
+    def _generate_post(self):
+        raise RuntimeError
+
+class MinimalStaticOutput(MinimalRepresentation):
+    _attr_list = ("dimensionality", "refine_by", "domain_dimensions",
+                  "current_time", "domain_left_edge", "domain_right_edge",
+                  "unique_identifier", "current_redshift", "output_hash",
+                  "cosmological_simulation", "omega_matter", "omega_lambda",
+                  "hubble_constant", "name")
+
+    def __init__(self, obj):
+        super(MinimalStaticOutput, self).__init__(obj)
+        self.output_hash = obj._hash()
+        self.name = str(obj)
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = []
+        return metadata, chunks
+
+class MinimalMappableData(MinimalRepresentation):
+
+    weight = "None"
+    _attr_list = ("field_data", "field", "weight", "axis", "output_hash")
+
+    def _generate_post(self):
+        nobj = self._return_filtered_object(("field_data",))
+        metadata = nobj._attrs
+        chunks = [(arr, self.field_data[arr]) for arr in self.field_data]
+        return (metadata, chunks)
+
+class MinimalProjectionData(MinimalMappableData):
+
+    def __init__(self, obj):
+        super(MinimalProjectionData, self).__init__(obj)
+        self.type = "proj"


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -39,49 +39,23 @@
 from yt.utilities.amr_utils import \
     QuadTree, merge_quadtrees
 
-exe_name = os.path.basename(sys.executable)
-# At import time, we determined whether or not we're being run in parallel.
-if exe_name in \
-        ["mpi4py", "embed_enzo",
-         "python"+sys.version[:3]+"-mpi"] \
-    or "--parallel" in sys.argv or '_parallel' in dir(sys) \
-    or any(["ipengine" in arg for arg in sys.argv]):
+parallel_capable = ytcfg.getboolean("yt", "__parallel")
+
+# Set up translation table and import things
+if parallel_capable:
     from mpi4py import MPI
-    parallel_capable = (MPI.COMM_WORLD.size > 1)
-    if parallel_capable:
-        mylog.info("Global parallel computation enabled: %s / %s",
-                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel"] = "True"
-        if exe_name == "embed_enzo" or \
-            ("_parallel" in dir(sys) and sys._parallel == True):
-            ytcfg["yt","inline"] = "True"
-        # I believe we do not need to turn this off manually
-        #ytcfg["yt","StoreParameterFiles"] = "False"
-        # Now let's make sure we have the right options set.
-        if MPI.COMM_WORLD.rank > 0:
-            if ytcfg.getboolean("yt","LogFile"):
-                ytcfg["yt","LogFile"] = "False"
-                yt.utilities.logger.disable_file_logging()
-        yt.utilities.logger.uncolorize_logging()
-        # Even though the uncolorize function already resets the format string,
-        # we reset it again so that it includes the processor.
-        f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
-                                            yt.utilities.logger.ufstring))
-        if len(yt.utilities.logger.rootLogger.handlers) > 0:
-            yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
-        if ytcfg.getboolean("yt", "parallel_traceback"):
-            sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
+    yt.utilities.logger.uncolorize_logging()
+    # Even though the uncolorize function already resets the format string,
+    # we reset it again so that it includes the processor.
+    f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
+                                        yt.utilities.logger.ufstring))
+    if len(yt.utilities.logger.rootLogger.handlers) > 0:
+        yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
+    if ytcfg.getboolean("yt", "parallel_traceback"):
+        sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
     if ytcfg.getint("yt","LogLevel") < 20:
         yt.utilities.logger.ytLogger.warning(
           "Log Level is set low -- this could affect parallel performance!")
-
-else:
-    parallel_capable = False
-
-# Set up translation table
-if parallel_capable:
     dtype_names = dict(
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
@@ -374,7 +348,8 @@
             to_share[rstore.result_id] = rstore.result
         else:
             yield obj
-    communication_system.communicators.pop()
+    if parallel_capable:
+        communication_system.communicators.pop()
     if storage is not None:
         # Now we have to broadcast it
         new_storage = my_communicator.par_combine_object(


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/pasteboard.py
--- a/yt/utilities/pasteboard.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from mercurial import ui, repo, commands, hg
-import json
-import os
-import time
-import uuid
-import urllib
-
-from yt.config import ytcfg
-
-def _get_last_mod(filectx):
-    rev = filectx.filectx(filectx.filerev())
-    return rev
-
-class PostInventory(object):
-    def __init__(self, uu = None, repo_fn = None):
-        if uu is None: uu = ui.ui()
-        if repo_fn is None: repo_fn = ytcfg.get("yt","pasteboard_repo")
-        if repo_fn == '':
-            raise KeyError("~/.yt/config:[yt]pasteboard_repo")
-        self.repo_fn = repo_fn
-        self.bbrepo = hg.repository(uu, repo_fn)
-        config_fn = os.path.join(repo_fn, ".hg", "hgrc")
-        uu.readconfig(config_fn)
-        commands.pull(uu, self.bbrepo)
-        commands.update(uu, self.bbrepo, clean=True)
-        if not os.path.exists(os.path.join(repo_fn, "posts")):
-            os.makedirs(os.path.join(repo_fn, "posts"))
-        if not os.path.exists(os.path.join(repo_fn, "html")):
-            os.makedirs(os.path.join(repo_fn, "html"))
-        self.uu = uu
-
-    def regenerate_posts(self):
-        self.posts = []
-        for file in self.bbrepo["tip"]:
-            if file.startswith("posts/") and file.count("/") == 1 \
-               and not file.endswith(".desc"):
-                filectx = self.bbrepo["tip"][file]
-                last_mod = _get_last_mod(filectx).date()
-                self.posts.append((last_mod[0] + last_mod[1], file))
-        self.posts.sort()
-        self.posts = self.posts[::-1]
-
-    def add_post(self, filename, desc = None,
-                 uu = None, highlight = True, push = True):
-        # We assume the post filename exists in the current space
-        self.regenerate_posts()
-        if uu is None: uu = self.uu
-        prefix = uuid.uuid4()
-        name = "%s-%s" % (prefix, os.path.basename(filename))
-        name_noext = name.replace(".","-")
-        hfn = "html/%s.html" % (name_noext)
-        pfn = "posts/%s" % (name)
-        abs_pfn = os.path.join(self.repo_fn, pfn)
-        abs_hfn = os.path.join(self.repo_fn, hfn)
-        if desc is not None:
-            open(abs_pfn + ".desc", "w").write(desc)
-        self.posts.insert(0, (int(time.time()), "posts/%s" % name))
-        if not os.path.exists(abs_pfn):
-            open(abs_pfn,"w").write(open(filename).read())
-        inv_fname = self.update_inventory()
-        if highlight and not name.endswith(".html"):
-            from pygments.cmdline import main as pygmain
-            rv = pygmain(["pygmentize", "-o", abs_hfn,
-                          "-O", "full", abs_pfn])
-        if not highlight or rv:
-            content = open(abs_pfn).read()
-            open(abs_hfn, "w").write(
-                "<HTML><BODY><PRE>" + content + "</PRE></BODY></HTML>")
-        to_manage = [abs_pfn, abs_hfn]
-        if desc is not None: to_manage.append(abs_pfn + ".desc")
-        commands.add(uu, self.bbrepo, *to_manage)
-        commands.commit(uu, self.bbrepo, *(to_manage + [inv_fname]),
-                        message="Adding %s" % name)
-        if push: commands.push(uu, self.bbrepo)
-
-    def update_inventory(self):
-        tip = self.bbrepo["tip"]
-        vals = []
-        for t, pfn in self.posts:
-            dfn = pfn + ".desc"
-            if dfn in tip:
-                d = tip[dfn].data()
-                last_mod =_get_last_mod(tip[dfn])
-                last_hash = last_mod.hex()
-                uname = last_mod.user()
-            elif pfn not in tip:
-                abs_pfn = os.path.join(self.repo_fn, pfn)
-                uname = self.uu.config("ui","username")
-                if os.path.exists(abs_pfn + ".desc"):
-                    d = open(abs_pfn + ".desc").read()
-                else:
-                    d = open(abs_pfn).read()
-                last_hash = "tip"
-            else:
-                d = tip[pfn].data()
-                last_mod = _get_last_mod(tip[pfn])
-                last_hash = last_mod.hex()
-                uname = last_mod.user()
-            if len(d) > 80: d = d[:77] + "..."
-            name_noext = pfn[6:].replace(".","-")
-            vals.append(dict(modified = time.ctime(t),
-                             modtime = t,
-                             lastmod_hash = last_hash,
-                             fullname = pfn,
-                             htmlname = "html/%s.html" % name_noext,
-                             name = pfn[43:], # 6 for posts/ then 36 for UUID
-                             username = uname,
-                             descr = d)) 
-        fn = os.path.join(self.repo_fn, "inventory.json")
-        f = open(fn, "w")
-        f.write("var inventory_data = ")
-        json.dump(vals, f, indent = 1)
-        f.write(";")
-        return fn
-
-def retrieve_pastefile(username, paste_id, output_fn = None):
-    # First we get the username's inventory.json
-    s = urllib.urlopen("http://%s.bitbucket.org/inventory.json" % (username))
-    data = s.read()
-    # This is an ugly, ugly hack for my lack of understanding of how best to
-    # handle this JSON stuff.
-    data = data[data.find("=")+1:data.rfind(";")] 
-    #import pdb;pdb.set_trace()
-    inv = json.loads(data)
-    k = None
-    if len(paste_id) == 36:
-        # Then this is a UUID
-        for k in inv:
-            if k['fullname'][6:42] == paste_id: break
-    elif len(paste_id) == 10:
-        pp = int(paste_id)
-        for k in inv:
-            if k['modtime'] == pp: break
-    if k is None: raise KeyError(k)
-    # k is our key
-    url = "http://%s.bitbucket.org/%s" % (username, k['fullname'])
-    s = urllib.urlopen(url)
-    data = s.read()
-    if output_fn is not None:
-        if os.path.exists(output_fn): raise IOError(output_fn)
-        open(output_fn, "w").write(data)
-    else:
-        print data


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/utilities/pexpect.py
--- a/yt/utilities/pexpect.py
+++ /dev/null
@@ -1,1845 +0,0 @@
-"""Pexpect is a Python module for spawning child applications and controlling
-them automatically. Pexpect can be used for automating interactive applications
-such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
-scripts for duplicating software package installations on different servers. It
-can be used for automated software testing. Pexpect is in the spirit of Don
-Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
-require TCL and Expect or require C extensions to be compiled. Pexpect does not
-use C, Expect, or TCL extensions. It should work on any platform that supports
-the standard Python pty module. The Pexpect interface focuses on ease of use so
-that simple tasks are easy.
-
-There are two main interfaces to Pexpect -- the function, run() and the class,
-spawn. You can call the run() function to execute a command and return the
-output. This is a handy replacement for os.system().
-
-For example::
-
-    pexpect.run('ls -la')
-
-The more powerful interface is the spawn class. You can use this to spawn an
-external child command and then interact with the child by sending lines and
-expecting responses.
-
-For example::
-
-    child = pexpect.spawn('scp foo myname at host.example.com:.')
-    child.expect ('Password:')
-    child.sendline (mypassword)
-
-This works even for commands that ask for passwords or other input outside of
-the normal stdio streams.
-
-Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
-Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
-vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
-Geoffrey Marshall, Francisco Lourenco, Glen Mabey, Karthik Gurusamy, Fernando
-Perez, Corey Minyard, Jon Cohen, Guillaume Chazarain, Andrew Ryan, Nick
-Craig-Wood, Andrew Stone, Jorgen Grahn (Let me know if I forgot anyone.)
-
-Free, open source, and all that good stuff.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-Pexpect Copyright (c) 2008 Noah Spurrier
-http://pexpect.sourceforge.net/
-
-$Id: pexpect.py 507 2007-12-27 02:40:52Z noah $
-"""
-
-try:
-    import os, sys, time
-    import select
-    import string
-    import re
-    import struct
-    import resource
-    import types
-    import pty
-    import tty
-    import termios
-    import fcntl
-    import errno
-    import traceback
-    import signal
-except ImportError, e:
-    raise ImportError (str(e) + """
-
-A critical module was not found. Probably this operating system does not
-support it. Pexpect is intended for UNIX-like operating systems.""")
-
-__version__ = '2.3'
-__revision__ = '$Revision: 399 $'
-__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
-    'split_command_line', '__version__', '__revision__']
-
-# Exception classes used by this module.
-class ExceptionPexpect(Exception):
-
-    """Base class for all exceptions raised by this module.
-    """
-
-    def __init__(self, value):
-
-        self.value = value
-
-    def __str__(self):
-
-        return str(self.value)
-
-    def get_trace(self):
-
-        """This returns an abbreviated stack trace with lines that only concern
-        the caller. In other words, the stack trace inside the Pexpect module
-        is not included. """
-
-        tblist = traceback.extract_tb(sys.exc_info()[2])
-        #tblist = filter(self.__filter_not_pexpect, tblist)
-        tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
-        tblist = traceback.format_list(tblist)
-        return ''.join(tblist)
-
-    def __filter_not_pexpect(self, trace_list_item):
-
-        """This returns True if list item 0 the string 'pexpect.py' in it. """
-
-        if trace_list_item[0].find('pexpect.py') == -1:
-            return True
-        else:
-            return False
-
-class EOF(ExceptionPexpect):
-
-    """Raised when EOF is read from a child. This usually means the child has exited."""
-
-class TIMEOUT(ExceptionPexpect):
-
-    """Raised when a read time exceeds the timeout. """
-
-##class TIMEOUT_PATTERN(TIMEOUT):
-##    """Raised when the pattern match time exceeds the timeout.
-##    This is different than a read TIMEOUT because the child process may
-##    give output, thus never give a TIMEOUT, but the output
-##    may never match a pattern.
-##    """
-##class MAXBUFFER(ExceptionPexpect):
-##    """Raised when a scan buffer fills before matching an expected pattern."""
-
-def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None):
-
-    """
-    This function runs the given command; waits for it to finish; then
-    returns all output as a string. STDERR is included in output. If the full
-    path to the command is not given then the path is searched.
-
-    Note that lines are terminated by CR/LF (\\r\\n) combination even on
-    UNIX-like systems because this is the standard for pseudo ttys. If you set
-    'withexitstatus' to true, then run will return a tuple of (command_output,
-    exitstatus). If 'withexitstatus' is false then this returns just
-    command_output.
-
-    The run() function can often be used instead of creating a spawn instance.
-    For example, the following code uses spawn::
-
-        from pexpect import *
-        child = spawn('scp foo myname at host.example.com:.')
-        child.expect ('(?i)password')
-        child.sendline (mypassword)
-
-    The previous code can be replace with the following::
-
-        from pexpect import *
-        run ('scp foo myname at host.example.com:.', events={'(?i)password': mypassword})
-
-    Examples
-    ========
-
-    Start the apache daemon on the local machine::
-
-        from pexpect import *
-        run ("/usr/local/apache/bin/apachectl start")
-
-    Check in a file using SVN::
-
-        from pexpect import *
-        run ("svn ci -m 'automatic commit' my_file.py")
-
-    Run a command and capture exit status::
-
-        from pexpect import *
-        (command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
-
-    Tricky Examples
-    ===============
-
-    The following will run SSH and execute 'ls -l' on the remote machine. The
-    password 'secret' will be sent if the '(?i)password' pattern is ever seen::
-
-        run ("ssh username at machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
-
-    This will start mencoder to rip a video from DVD. This will also display
-    progress ticks every 5 seconds as it runs. For example::
-
-        from pexpect import *
-        def print_ticks(d):
-            print d['event_count'],
-        run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
-
-    The 'events' argument should be a dictionary of patterns and responses.
-    Whenever one of the patterns is seen in the command out run() will send the
-    associated response string. Note that you should put newlines in your
-    string if Enter is necessary. The responses may also contain callback
-    functions. Any callback is function that takes a dictionary as an argument.
-    The dictionary contains all the locals from the run() function, so you can
-    access the child spawn object or any other variable defined in run()
-    (event_count, child, and extra_args are the most useful). A callback may
-    return True to stop the current run process otherwise run() continues until
-    the next event. A callback may also return a string which will be sent to
-    the child. 'extra_args' is not used by directly run(). It provides a way to
-    pass data to a callback function through run() through the locals
-    dictionary passed to a callback. """
-
-    if timeout == -1:
-        child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
-    else:
-        child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env)
-    if events is not None:
-        patterns = events.keys()
-        responses = events.values()
-    else:
-        patterns=None # We assume that EOF or TIMEOUT will save us.
-        responses=None
-    child_result_list = []
-    event_count = 0
-    while 1:
-        try:
-            index = child.expect (patterns)
-            if type(child.after) in types.StringTypes:
-                child_result_list.append(child.before + child.after)
-            else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
-                child_result_list.append(child.before)
-            if type(responses[index]) in types.StringTypes:
-                child.send(responses[index])
-            elif type(responses[index]) is types.FunctionType:
-                callback_result = responses[index](locals())
-                sys.stdout.flush()
-                if type(callback_result) in types.StringTypes:
-                    child.send(callback_result)
-                elif callback_result:
-                    break
-            else:
-                raise TypeError ('The callback must be a string or function type.')
-            event_count = event_count + 1
-        except TIMEOUT, e:
-            child_result_list.append(child.before)
-            break
-        except EOF, e:
-            child_result_list.append(child.before)
-            break
-    child_result = ''.join(child_result_list)
-    if withexitstatus:
-        child.close()
-        return (child_result, child.exitstatus)
-    else:
-        return child_result
-
-class spawn (object):
-
-    """This is the main class interface for Pexpect. Use this class to start
-    and control child applications. """
-
-    def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
-
-        """This is the constructor. The command parameter may be a string that
-        includes a command and any arguments to the command. For example::
-
-            child = pexpect.spawn ('/usr/bin/ftp')
-            child = pexpect.spawn ('/usr/bin/ssh user at example.com')
-            child = pexpect.spawn ('ls -latr /tmp')
-
-        You may also construct it with a list of arguments like so::
-
-            child = pexpect.spawn ('/usr/bin/ftp', [])
-            child = pexpect.spawn ('/usr/bin/ssh', ['user at example.com'])
-            child = pexpect.spawn ('ls', ['-latr', '/tmp'])
-
-        After this the child application will be created and will be ready to
-        talk to. For normal use, see expect() and send() and sendline().
-
-        Remember that Pexpect does NOT interpret shell meta characters such as
-        redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
-        If you want to run a command and pipe it through another command then
-        you must also start a shell. For example::
-
-            child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
-            child.expect(pexpect.EOF)
-
-        The second form of spawn (where you pass a list of arguments) is useful
-        in situations where you wish to spawn a command and pass it its own
-        argument list. This can make syntax more clear. For example, the
-        following is equivalent to the previous example::
-
-            shell_cmd = 'ls -l | grep LOG > log_list.txt'
-            child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
-            child.expect(pexpect.EOF)
-
-        The maxread attribute sets the read buffer size. This is maximum number
-        of bytes that Pexpect will try to read from a TTY at one time. Setting
-        the maxread size to 1 will turn off buffering. Setting the maxread
-        value higher may help performance in cases where large amounts of
-        output are read back from the child. This feature is useful in
-        conjunction with searchwindowsize.
-
-        The searchwindowsize attribute sets the how far back in the incomming
-        seach buffer Pexpect will search for pattern matches. Every time
-        Pexpect reads some data from the child it will append the data to the
-        incomming buffer. The default is to search from the beginning of the
-        imcomming buffer each time new data is read from the child. But this is
-        very inefficient if you are running a command that generates a large
-        amount of data where you want to match The searchwindowsize does not
-        effect the size of the incomming data buffer. You will still have
-        access to the full buffer after expect() returns.
-
-        The logfile member turns on or off logging. All input and output will
-        be copied to the given file object. Set logfile to None to stop
-        logging. This is the default. Set logfile to sys.stdout to echo
-        everything to standard output. The logfile is flushed after each write.
-
-        Example log input and output to a file::
-
-            child = pexpect.spawn('some_command')
-            fout = file('mylog.txt','w')
-            child.logfile = fout
-
-        Example log to stdout::
-
-            child = pexpect.spawn('some_command')
-            child.logfile = sys.stdout
-
-        The logfile_read and logfile_send members can be used to separately log
-        the input from the child and output sent to the child. Sometimes you
-        don't want to see everything you write to the child. You only want to
-        log what the child sends back. For example::
-        
-            child = pexpect.spawn('some_command')
-            child.logfile_read = sys.stdout
-
-        To separately log output sent to the child use logfile_send::
-        
-            self.logfile_send = fout
-
-        The delaybeforesend helps overcome a weird behavior that many users
-        were experiencing. The typical problem was that a user would expect() a
-        "Password:" prompt and then immediately call sendline() to send the
-        password. The user would then see that their password was echoed back
-        to them. Passwords don't normally echo. The problem is caused by the
-        fact that most applications print out the "Password" prompt and then
-        turn off stdin echo, but if you send your password before the
-        application turned off echo, then you get your password echoed.
-        Normally this wouldn't be a problem when interacting with a human at a
-        real keyboard. If you introduce a slight delay just before writing then
-        this seems to clear up the problem. This was such a common problem for
-        many users that I decided that the default pexpect behavior should be
-        to sleep just before writing to the child application. 1/20th of a
-        second (50 ms) seems to be enough to clear up the problem. You can set
-        delaybeforesend to 0 to return to the old behavior. Most Linux machines
-        don't like this to be below 0.03. I don't know why.
-
-        Note that spawn is clever about finding commands on your path.
-        It uses the same logic that "which" uses to find executables.
-
-        If you wish to get the exit status of the child you must call the
-        close() method. The exit or signal status of the child will be stored
-        in self.exitstatus or self.signalstatus. If the child exited normally
-        then exitstatus will store the exit return code and signalstatus will
-        be None. If the child was terminated abnormally with a signal then
-        signalstatus will store the signal value and exitstatus will be None.
-        If you need more detail you can also read the self.status member which
-        stores the status returned by os.waitpid. You can interpret this using
-        os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
-
-        self.STDIN_FILENO = pty.STDIN_FILENO
-        self.STDOUT_FILENO = pty.STDOUT_FILENO
-        self.STDERR_FILENO = pty.STDERR_FILENO
-        self.stdin = sys.stdin
-        self.stdout = sys.stdout
-        self.stderr = sys.stderr
-
-        self.searcher = None
-        self.ignorecase = False
-        self.before = None
-        self.after = None
-        self.match = None
-        self.match_index = None
-        self.terminated = True
-        self.exitstatus = None
-        self.signalstatus = None
-        self.status = None # status returned by os.waitpid
-        self.flag_eof = False
-        self.pid = None
-        self.child_fd = -1 # initially closed
-        self.timeout = timeout
-        self.delimiter = EOF
-        self.logfile = logfile
-        self.logfile_read = None # input from child (read_nonblocking)
-        self.logfile_send = None # output to send (send, sendline)
-        self.maxread = maxread # max bytes to read at one time into buffer
-        self.buffer = '' # This is the read buffer. See maxread.
-        self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
-        # Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
-        self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
-        self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
-        self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
-        self.softspace = False # File-like object.
-        self.name = '<' + repr(self) + '>' # File-like object.
-        self.encoding = None # File-like object.
-        self.closed = True # File-like object.
-        self.cwd = cwd
-        self.env = env
-        self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
-        # Solaris uses internal __fork_pty(). All others use pty.fork().
-        if (sys.platform.lower().find('solaris')>=0) or (sys.platform.lower().find('sunos5')>=0):
-            self.use_native_pty_fork = False
-        else:
-            self.use_native_pty_fork = True
-
-
-        # allow dummy instances for subclasses that may not use command or args.
-        if command is None:
-            self.command = None
-            self.args = None
-            self.name = '<pexpect factory incomplete>'
-        else:
-            self._spawn (command, args)
-
-    def __del__(self):
-
-        """This makes sure that no system resources are left open. Python only
-        garbage collects Python objects. OS file descriptors are not Python
-        objects, so they must be handled explicitly. If the child file
-        descriptor was opened outside of this class (passed to the constructor)
-        then this does not close it. """
-
-        if not self.closed:
-            # It is possible for __del__ methods to execute during the
-            # teardown of the Python VM itself. Thus self.close() may
-            # trigger an exception because os.close may be None.
-            # -- Fernando Perez
-            try:
-                self.close()
-            except AttributeError:
-                pass
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object. """
-
-        s = []
-        s.append(repr(self))
-        s.append('version: ' + __version__ + ' (' + __revision__ + ')')
-        s.append('command: ' + str(self.command))
-        s.append('args: ' + str(self.args))
-        s.append('searcher: ' + str(self.searcher))
-        s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
-        s.append('before (last 100 chars): ' + str(self.before)[-100:])
-        s.append('after: ' + str(self.after))
-        s.append('match: ' + str(self.match))
-        s.append('match_index: ' + str(self.match_index))
-        s.append('exitstatus: ' + str(self.exitstatus))
-        s.append('flag_eof: ' + str(self.flag_eof))
-        s.append('pid: ' + str(self.pid))
-        s.append('child_fd: ' + str(self.child_fd))
-        s.append('closed: ' + str(self.closed))
-        s.append('timeout: ' + str(self.timeout))
-        s.append('delimiter: ' + str(self.delimiter))
-        s.append('logfile: ' + str(self.logfile))
-        s.append('logfile_read: ' + str(self.logfile_read))
-        s.append('logfile_send: ' + str(self.logfile_send))
-        s.append('maxread: ' + str(self.maxread))
-        s.append('ignorecase: ' + str(self.ignorecase))
-        s.append('searchwindowsize: ' + str(self.searchwindowsize))
-        s.append('delaybeforesend: ' + str(self.delaybeforesend))
-        s.append('delayafterclose: ' + str(self.delayafterclose))
-        s.append('delayafterterminate: ' + str(self.delayafterterminate))
-        return '\n'.join(s)
-
-    def _spawn(self,command,args=[]):
-
-        """This starts the given command in a child process. This does all the
-        fork/exec type of stuff for a pty. This is called by __init__. If args
-        is empty then command will be parsed (split on spaces) and args will be
-        set to parsed arguments. """
-
-        # The pid and child_fd of this object get set by this method.
-        # Note that it is difficult for this method to fail.
-        # You cannot detect if the child process cannot start.
-        # So the only way you can tell if the child process started
-        # or not is to try to read from the file descriptor. If you get
-        # EOF immediately then it means that the child is already dead.
-        # That may not necessarily be bad because you may haved spawned a child
-        # that performs some task; creates no stdout output; and then dies.
-
-        # If command is an int type then it may represent a file descriptor.
-        if type(command) == type(0):
-            raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
-
-        if type (args) != type([]):
-            raise TypeError ('The argument, args, must be a list.')
-
-        if args == []:
-            self.args = split_command_line(command)
-            self.command = self.args[0]
-        else:
-            self.args = args[:] # work with a copy
-            self.args.insert (0, command)
-            self.command = command
-
-        command_with_path = which(self.command)
-        if command_with_path is None:
-            raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
-        self.command = command_with_path
-        self.args[0] = self.command
-
-        self.name = '<' + ' '.join (self.args) + '>'
-
-        assert self.pid is None, 'The pid member should be None.'
-        assert self.command is not None, 'The command member should not be None.'
-
-        if self.use_native_pty_fork:
-            try:
-                self.pid, self.child_fd = pty.fork()
-            except OSError, e:
-                raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
-        else: # Use internal __fork_pty
-            self.pid, self.child_fd = self.__fork_pty()
-
-        if self.pid == 0: # Child
-            try:
-                self.child_fd = sys.stdout.fileno() # used by setwinsize()
-                self.setwinsize(24, 80)
-            except:
-                # Some platforms do not like setwinsize (Cygwin).
-                # This will cause problem when running applications that
-                # are very picky about window size.
-                # This is a serious limitation, but not a show stopper.
-                pass
-            # Do not allow child to inherit open file descriptors from parent.
-            max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
-            for i in range (3, max_fd):
-                try:
-                    os.close (i)
-                except OSError:
-                    pass
-
-            # I don't know why this works, but ignoring SIGHUP fixes a
-            # problem when trying to start a Java daemon with sudo
-            # (specifically, Tomcat).
-            signal.signal(signal.SIGHUP, signal.SIG_IGN)
-
-            if self.cwd is not None:
-                os.chdir(self.cwd)
-            if self.env is None:
-                os.execv(self.command, self.args)
-            else:
-                os.execvpe(self.command, self.args, self.env)
-
-        # Parent
-        self.terminated = False
-        self.closed = False
-
-    def __fork_pty(self):
-
-        """This implements a substitute for the forkpty system call. This
-        should be more portable than the pty.fork() function. Specifically,
-        this should work on Solaris.
-
-        Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
-        resolve the issue with Python's pty.fork() not supporting Solaris,
-        particularly ssh. Based on patch to posixmodule.c authored by Noah
-        Spurrier::
-
-            http://mail.python.org/pipermail/python-dev/2003-May/035281.html
-
-        """
-
-        parent_fd, child_fd = os.openpty()
-        if parent_fd < 0 or child_fd < 0:
-            raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
-
-        pid = os.fork()
-        if pid < 0:
-            raise ExceptionPexpect, "Error! Failed os.fork()."
-        elif pid == 0:
-            # Child.
-            os.close(parent_fd)
-            self.__pty_make_controlling_tty(child_fd)
-
-            os.dup2(child_fd, 0)
-            os.dup2(child_fd, 1)
-            os.dup2(child_fd, 2)
-
-            if child_fd > 2:
-                os.close(child_fd)
-        else:
-            # Parent.
-            os.close(child_fd)
-
-        return pid, parent_fd
-
-    def __pty_make_controlling_tty(self, tty_fd):
-
-        """This makes the pseudo-terminal the controlling tty. This should be
-        more portable than the pty.fork() function. Specifically, this should
-        work on Solaris. """
-
-        child_name = os.ttyname(tty_fd)
-
-        # Disconnect from controlling tty if still connected.
-        fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
-        if fd >= 0:
-            os.close(fd)
-
-        os.setsid()
-
-        # Verify we are disconnected from controlling tty
-        try:
-            fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
-            if fd >= 0:
-                os.close(fd)
-                raise ExceptionPexpect, "Error! We are not disconnected from a controlling tty."
-        except:
-            # Good! We are disconnected from a controlling tty.
-            pass
-
-        # Verify we can open child pty.
-        fd = os.open(child_name, os.O_RDWR);
-        if fd < 0:
-            raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
-        else:
-            os.close(fd)
-
-        # Verify we now have a controlling tty.
-        fd = os.open("/dev/tty", os.O_WRONLY)
-        if fd < 0:
-            raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
-        else:
-            os.close(fd)
-
-    def fileno (self):   # File-like object.
-
-        """This returns the file descriptor of the pty for the child.
-        """
-
-        return self.child_fd
-
-    def close (self, force=True):   # File-like object.
-
-        """This closes the connection with the child application. Note that
-        calling close() more than once is valid. This emulates standard Python
-        behavior with files. Set force to True if you want to make sure that
-        the child is terminated (SIGKILL is sent if the child ignores SIGHUP
-        and SIGINT). """
-
-        if not self.closed:
-            self.flush()
-            os.close (self.child_fd)
-            time.sleep(self.delayafterclose) # Give kernel time to update process status.
-            if self.isalive():
-                if not self.terminate(force):
-                    raise ExceptionPexpect ('close() could not terminate the child using terminate()')
-            self.child_fd = -1
-            self.closed = True
-            #self.pid = None
-
-    def flush (self):   # File-like object.
-
-        """This does nothing. It is here to support the interface for a
-        File-like object. """
-
-        pass
-
-    def isatty (self):   # File-like object.
-
-        """This returns True if the file descriptor is open and connected to a
-        tty(-like) device, else False. """
-
-        return os.isatty(self.child_fd)
-
-    def waitnoecho (self, timeout=-1):
-
-        """This waits until the terminal ECHO flag is set False. This returns
-        True if the echo mode is off. This returns False if the ECHO flag was
-        not set False before the timeout. This can be used to detect when the
-        child is waiting for a password. Usually a child application will turn
-        off echo mode when it is waiting for the user to enter a password. For
-        example, instead of expecting the "password:" prompt you can wait for
-        the child to set ECHO off::
-
-            p = pexpect.spawn ('ssh user at example.com')
-            p.waitnoecho()
-            p.sendline(mypassword)
-
-        If timeout is None then this method to block forever until ECHO flag is
-        False.
-
-        """
-
-        if timeout == -1:
-            timeout = self.timeout
-        if timeout is not None:
-            end_time = time.time() + timeout 
-        while True:
-            if not self.getecho():
-                return True
-            if timeout < 0 and timeout is not None:
-                return False
-            if timeout is not None:
-                timeout = end_time - time.time()
-            time.sleep(0.1)
-
-    def getecho (self):
-
-        """This returns the terminal echo mode. This returns True if echo is
-        on or False if echo is off. Child applications that are expecting you
-        to enter a password often set ECHO False. See waitnoecho(). """
-
-        attr = termios.tcgetattr(self.child_fd)
-        if attr[3] & termios.ECHO:
-            return True
-        return False
-
-    def setecho (self, state):
-
-        """This sets the terminal echo mode on or off. Note that anything the
-        child sent before the echo will be lost, so you should be sure that
-        your input buffer is empty before you call setecho(). For example, the
-        following will work as expected::
-
-            p = pexpect.spawn('cat')
-            p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
-            p.expect (['1234'])
-            p.expect (['1234'])
-            p.setecho(False) # Turn off tty echo
-            p.sendline ('abcd') # We will set this only once (echoed by cat).
-            p.sendline ('wxyz') # We will set this only once (echoed by cat)
-            p.expect (['abcd'])
-            p.expect (['wxyz'])
-
-        The following WILL NOT WORK because the lines sent before the setecho
-        will be lost::
-
-            p = pexpect.spawn('cat')
-            p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
-            p.setecho(False) # Turn off tty echo
-            p.sendline ('abcd') # We will set this only once (echoed by cat).
-            p.sendline ('wxyz') # We will set this only once (echoed by cat)
-            p.expect (['1234'])
-            p.expect (['1234'])
-            p.expect (['abcd'])
-            p.expect (['wxyz'])
-        """
-
-        self.child_fd
-        attr = termios.tcgetattr(self.child_fd)
-        if state:
-            attr[3] = attr[3] | termios.ECHO
-        else:
-            attr[3] = attr[3] & ~termios.ECHO
-        # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
-        # and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
-        termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
-
-    def read_nonblocking (self, size = 1, timeout = -1):
-
-        """This reads at most size characters from the child application. It
-        includes a timeout. If the read does not complete within the timeout
-        period then a TIMEOUT exception is raised. If the end of file is read
-        then an EOF exception will be raised. If a log file was set using
-        setlog() then all data will also be written to the log file.
-
-        If timeout is None then the read may block indefinitely. If timeout is -1
-        then the self.timeout value is used. If timeout is 0 then the child is
-        polled and if there was no data immediately ready then this will raise
-        a TIMEOUT exception.
-
-        The timeout refers only to the amount of time to read at least one
-        character. This is not effected by the 'size' parameter, so if you call
-        read_nonblocking(size=100, timeout=30) and only one character is
-        available right away then one character will be returned immediately.
-        It will not wait for 30 seconds for another 99 characters to come in.
-
-        This is a wrapper around os.read(). It uses select.select() to
-        implement the timeout. """
-
-        if self.closed:
-            raise ValueError ('I/O operation on closed file in read_nonblocking().')
-
-        if timeout == -1:
-            timeout = self.timeout
-
-        # Note that some systems such as Solaris do not give an EOF when
-        # the child dies. In fact, you can still try to read
-        # from the child_fd -- it will block forever or until TIMEOUT.
-        # For this case, I test isalive() before doing any reading.
-        # If isalive() is false, then I pretend that this is the same as EOF.
-        if not self.isalive():
-            r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
-            if not r:
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
-        elif self.__irix_hack:
-            # This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
-            # This adds a 2 second delay, but only when the child is terminated.
-            r, w, e = self.__select([self.child_fd], [], [], 2)
-            if not r and not self.isalive():
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
-
-        r,w,e = self.__select([self.child_fd], [], [], timeout)
-
-        if not r:
-            if not self.isalive():
-                # Some platforms, such as Irix, will claim that their processes are alive;
-                # then timeout on the select; and then finally admit that they are not alive.
-                self.flag_eof = True
-                raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
-            else:
-                raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
-
-        if self.child_fd in r:
-            try:
-                s = os.read(self.child_fd, size)
-            except OSError, e: # Linux does this
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
-            if s == '': # BSD style
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
-
-            if self.logfile is not None:
-                self.logfile.write (s)
-                self.logfile.flush()
-            if self.logfile_read is not None:
-                self.logfile_read.write (s)
-                self.logfile_read.flush()
-
-            return s
-
-        raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
-
-    def read (self, size = -1):   # File-like object.
-
-        """This reads at most "size" bytes from the file (less if the read hits
-        EOF before obtaining size bytes). If the size argument is negative or
-        omitted, read all data until EOF is reached. The bytes are returned as
-        a string object. An empty string is returned when EOF is encountered
-        immediately. """
-
-        if size == 0:
-            return ''
-        if size < 0:
-            self.expect (self.delimiter) # delimiter default is EOF
-            return self.before
-
-        # I could have done this more directly by not using expect(), but
-        # I deliberately decided to couple read() to expect() so that
-        # I would catch any bugs early and ensure consistant behavior.
-        # It's a little less efficient, but there is less for me to
-        # worry about if I have to later modify read() or expect().
-        # Note, it's OK if size==-1 in the regex. That just means it
-        # will never match anything in which case we stop only on EOF.
-        cre = re.compile('.{%d}' % size, re.DOTALL)
-        index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
-        if index == 0:
-            return self.after ### self.before should be ''. Should I assert this?
-        return self.before
-
-    def readline (self, size = -1):    # File-like object.
-
-        """This reads and returns one entire line. A trailing newline is kept
-        in the string, but may be absent when a file ends with an incomplete
-        line. Note: This readline() looks for a \\r\\n pair even on UNIX
-        because this is what the pseudo tty device returns. So contrary to what
-        you may expect you will receive the newline as \\r\\n. An empty string
-        is returned when EOF is hit immediately. Currently, the size argument is
-        mostly ignored, so this behavior is not standard for a file-like
-        object. If size is 0 then an empty string is returned. """
-
-        if size == 0:
-            return ''
-        index = self.expect (['\r\n', self.delimiter]) # delimiter default is EOF
-        if index == 0:
-            return self.before + '\r\n'
-        else:
-            return self.before
-
-    def __iter__ (self):    # File-like object.
-
-        """This is to support iterators over a file-like object.
-        """
-
-        return self
-
-    def next (self):    # File-like object.
-
-        """This is to support iterators over a file-like object.
-        """
-
-        result = self.readline()
-        if result == "":
-            raise StopIteration
-        return result
-
-    def readlines (self, sizehint = -1):    # File-like object.
-
-        """This reads until EOF using readline() and returns a list containing
-        the lines thus read. The optional "sizehint" argument is ignored. """
-
-        lines = []
-        while True:
-            line = self.readline()
-            if not line:
-                break
-            lines.append(line)
-        return lines
-
-    def write(self, s):   # File-like object.
-
-        """This is similar to send() except that there is no return value.
-        """
-
-        self.send (s)
-
-    def writelines (self, sequence):   # File-like object.
-
-        """This calls write() for each element in the sequence. The sequence
-        can be any iterable object producing strings, typically a list of
-        strings. This does not add line separators There is no return value.
-        """
-
-        for s in sequence:
-            self.write (s)
-
-    def send(self, s):
-
-        """This sends a string to the child process. This returns the number of
-        bytes written. If a log file was set then the data is also written to
-        the log. """
-
-        time.sleep(self.delaybeforesend)
-        if self.logfile is not None:
-            self.logfile.write (s)
-            self.logfile.flush()
-        if self.logfile_send is not None:
-            self.logfile_send.write (s)
-            self.logfile_send.flush()
-        c = os.write(self.child_fd, s)
-        return c
-
-    def sendline(self, s=''):
-
-        """This is like send(), but it adds a line feed (os.linesep). This
-        returns the number of bytes written. """
-
-        n = self.send(s)
-        n = n + self.send (os.linesep)
-        return n
-
-    def sendcontrol(self, char):
-
-        """This sends a control character to the child such as Ctrl-C or
-        Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
-
-            child.sendcontrol('g')
-
-        See also, sendintr() and sendeof().
-        """
-
-        char = char.lower()
-        a = ord(char)
-        if a>=97 and a<=122:
-            a = a - ord('a') + 1
-            return self.send (chr(a))
-        d = {'@':0, '`':0,
-            '[':27, '{':27,
-            '\\':28, '|':28,
-            ']':29, '}': 29,
-            '^':30, '~':30,
-            '_':31,
-            '?':127}
-        if char not in d:
-            return 0
-        return self.send (chr(d[char]))
-
-    def sendeof(self):
-
-        """This sends an EOF to the child. This sends a character which causes
-        the pending parent output buffer to be sent to the waiting child
-        program without waiting for end-of-line. If it is the first character
-        of the line, the read() in the user program returns 0, which signifies
-        end-of-file. This means to work as expected a sendeof() has to be
-        called at the beginning of a line. This method does not send a newline.
-        It is the responsibility of the caller to ensure the eof is sent at the
-        beginning of a line. """
-
-        ### Hmmm... how do I send an EOF?
-        ###C  if ((m = write(pty, *buf, p - *buf)) < 0)
-        ###C      return (errno == EWOULDBLOCK) ? n : -1;
-        #fd = sys.stdin.fileno()
-        #old = termios.tcgetattr(fd) # remember current state
-        #attr = termios.tcgetattr(fd)
-        #attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
-        #try: # use try/finally to ensure state gets restored
-        #    termios.tcsetattr(fd, termios.TCSADRAIN, attr)
-        #    if hasattr(termios, 'CEOF'):
-        #        os.write (self.child_fd, '%c' % termios.CEOF)
-        #    else:
-        #        # Silly platform does not define CEOF so assume CTRL-D
-        #        os.write (self.child_fd, '%c' % 4)
-        #finally: # restore state
-        #    termios.tcsetattr(fd, termios.TCSADRAIN, old)
-        if hasattr(termios, 'VEOF'):
-            char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
-        else:
-            # platform does not define VEOF so assume CTRL-D
-            char = chr(4)
-        self.send(char)
-
-    def sendintr(self):
-
-        """This sends a SIGINT to the child. It does not require
-        the SIGINT to be the first character on a line. """
-
-        if hasattr(termios, 'VINTR'):
-            char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
-        else:
-            # platform does not define VINTR so assume CTRL-C
-            char = chr(3)
-        self.send (char)
-
-    def eof (self):
-
-        """This returns True if the EOF exception was ever raised.
-        """
-
-        return self.flag_eof
-
-    def terminate(self, force=False):
-
-        """This forces a child process to terminate. It starts nicely with
-        SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
-        returns True if the child was terminated. This returns False if the
-        child could not be terminated. """
-
-        if not self.isalive():
-            return True
-        try:
-            self.kill(signal.SIGHUP)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            self.kill(signal.SIGCONT)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            self.kill(signal.SIGINT)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            if force:
-                self.kill(signal.SIGKILL)
-                time.sleep(self.delayafterterminate)
-                if not self.isalive():
-                    return True
-                else:
-                    return False
-            return False
-        except OSError, e:
-            # I think there are kernel timing issues that sometimes cause
-            # this to happen. I think isalive() reports True, but the
-            # process is dead to the kernel.
-            # Make one last attempt to see if the kernel is up to date.
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            else:
-                return False
-
-    def wait(self):
-
-        """This waits until the child exits. This is a blocking call. This will
-        not read any data from the child, so this will block forever if the
-        child has unread output and has terminated. In other words, the child
-        may have printed output then called exit(); but, technically, the child
-        is still alive until its output is read. """
-
-        if self.isalive():
-            pid, status = os.waitpid(self.pid, 0)
-        else:
-            raise ExceptionPexpect ('Cannot wait for dead child process.')
-        self.exitstatus = os.WEXITSTATUS(status)
-        if os.WIFEXITED (status):
-            self.status = status
-            self.exitstatus = os.WEXITSTATUS(status)
-            self.signalstatus = None
-            self.terminated = True
-        elif os.WIFSIGNALED (status):
-            self.status = status
-            self.exitstatus = None
-            self.signalstatus = os.WTERMSIG(status)
-            self.terminated = True
-        elif os.WIFSTOPPED (status):
-            raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
-        return self.exitstatus
-
-    def isalive(self):
-
-        """This tests if the child process is running or not. This is
-        non-blocking. If the child was terminated then this will read the
-        exitstatus or signalstatus of the child. This returns True if the child
-        process appears to be running or False if not. It can take literally
-        SECONDS for Solaris to return the right status. """
-
-        if self.terminated:
-            return False
-
-        if self.flag_eof:
-            # This is for Linux, which requires the blocking form of waitpid to get
-            # status of a defunct process. This is super-lame. The flag_eof would have
-            # been set in read_nonblocking(), so this should be safe.
-            waitpid_options = 0
-        else:
-            waitpid_options = os.WNOHANG
-
-        try:
-            pid, status = os.waitpid(self.pid, waitpid_options)
-        except OSError, e: # No child processes
-            if e[0] == errno.ECHILD:
-                raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
-            else:
-                raise e
-
-        # I have to do this twice for Solaris. I can't even believe that I figured this out...
-        # If waitpid() returns 0 it means that no child process wishes to
-        # report, and the value of status is undefined.
-        if pid == 0:
-            try:
-                pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
-            except OSError, e: # This should never happen...
-                if e[0] == errno.ECHILD:
-                    raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
-                else:
-                    raise e
-
-            # If pid is still 0 after two calls to waitpid() then
-            # the process really is alive. This seems to work on all platforms, except
-            # for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
-            # take care of this situation (unfortunately, this requires waiting through the timeout).
-            if pid == 0:
-                return True
-
-        if pid == 0:
-            return True
-
-        if os.WIFEXITED (status):
-            self.status = status
-            self.exitstatus = os.WEXITSTATUS(status)
-            self.signalstatus = None
-            self.terminated = True
-        elif os.WIFSIGNALED (status):
-            self.status = status
-            self.exitstatus = None
-            self.signalstatus = os.WTERMSIG(status)
-            self.terminated = True
-        elif os.WIFSTOPPED (status):
-            raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
-        return False
-
-    def kill(self, sig):
-
-        """This sends the given signal to the child application. In keeping
-        with UNIX tradition it has a misleading name. It does not necessarily
-        kill the child unless you send the right signal. """
-
-        # Same as os.kill, but the pid is given for you.
-        if self.isalive():
-            os.kill(self.pid, sig)
-
-    def compile_pattern_list(self, patterns):
-
-        """This compiles a pattern-string or a list of pattern-strings.
-        Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
-        those. Patterns may also be None which results in an empty list (you
-        might do this if waiting for an EOF or TIMEOUT condition without
-        expecting any pattern).
-
-        This is used by expect() when calling expect_list(). Thus expect() is
-        nothing more than::
-
-             cpl = self.compile_pattern_list(pl)
-             return self.expect_list(cpl, timeout)
-
-        If you are using expect() within a loop it may be more
-        efficient to compile the patterns first and then call expect_list().
-        This avoid calls in a loop to compile_pattern_list()::
-
-             cpl = self.compile_pattern_list(my_pattern)
-             while some_condition:
-                ...
-                i = self.expect_list(clp, timeout)
-                ...
-        """
-
-        if patterns is None:
-            return []
-        if type(patterns) is not types.ListType:
-            patterns = [patterns]
-
-        compile_flags = re.DOTALL # Allow dot to match \n
-        if self.ignorecase:
-            compile_flags = compile_flags | re.IGNORECASE
-        compiled_pattern_list = []
-        for p in patterns:
-            if type(p) in types.StringTypes:
-                compiled_pattern_list.append(re.compile(p, compile_flags))
-            elif p is EOF:
-                compiled_pattern_list.append(EOF)
-            elif p is TIMEOUT:
-                compiled_pattern_list.append(TIMEOUT)
-            elif type(p) is type(re.compile('')):
-                compiled_pattern_list.append(p)
-            else:
-                raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
-
-        return compiled_pattern_list
-
-    def expect(self, pattern, timeout = -1, searchwindowsize=None):
-
-        """This seeks through the stream until a pattern is matched. The
-        pattern is overloaded and may take several types. The pattern can be a
-        StringType, EOF, a compiled re, or a list of any of those types.
-        Strings will be compiled to re types. This returns the index into the
-        pattern list. If the pattern was not a list this returns index 0 on a
-        successful match. This may raise exceptions for EOF or TIMEOUT. To
-        avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
-        list. That will cause expect to match an EOF or TIMEOUT condition
-        instead of raising an exception.
-
-        If you pass a list of patterns and more than one matches, the first match
-        in the stream is chosen. If more than one pattern matches at that point,
-        the leftmost in the pattern list is chosen. For example::
-
-            # the input is 'foobar'
-            index = p.expect (['bar', 'foo', 'foobar'])
-            # returns 1 ('foo') even though 'foobar' is a "better" match
-
-        Please note, however, that buffering can affect this behavior, since
-        input arrives in unpredictable chunks. For example::
-
-            # the input is 'foobar'
-            index = p.expect (['foobar', 'foo'])
-            # returns 0 ('foobar') if all input is available at once,
-            # but returs 1 ('foo') if parts of the final 'bar' arrive late
-
-        After a match is found the instance attributes 'before', 'after' and
-        'match' will be set. You can see all the data read before the match in
-        'before'. You can see the data that was matched in 'after'. The
-        re.MatchObject used in the re match will be in 'match'. If an error
-        occurred then 'before' will be set to all the data read so far and
-        'after' and 'match' will be None.
-
-        If timeout is -1 then timeout will be set to the self.timeout value.
-
-        A list entry may be EOF or TIMEOUT instead of a string. This will
-        catch these exceptions and return the index of the list entry instead
-        of raising the exception. The attribute 'after' will be set to the
-        exception type. The attribute 'match' will be None. This allows you to
-        write code like this::
-
-                index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
-                if index == 0:
-                    do_something()
-                elif index == 1:
-                    do_something_else()
-                elif index == 2:
-                    do_some_other_thing()
-                elif index == 3:
-                    do_something_completely_different()
-
-        instead of code like this::
-
-                try:
-                    index = p.expect (['good', 'bad'])
-                    if index == 0:
-                        do_something()
-                    elif index == 1:
-                        do_something_else()
-                except EOF:
-                    do_some_other_thing()
-                except TIMEOUT:
-                    do_something_completely_different()
-
-        These two forms are equivalent. It all depends on what you want. You
-        can also just expect the EOF if you are waiting for all output of a
-        child to finish. For example::
-
-                p = pexpect.spawn('/bin/ls')
-                p.expect (pexpect.EOF)
-                print p.before
-
-        If you are trying to optimize for speed then see expect_list().
-        """
-
-        compiled_pattern_list = self.compile_pattern_list(pattern)
-        return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
-
-    def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
-        """This takes a list of compiled regular expressions and returns the
-        index into the pattern_list that matched the child output. The list may
-        also contain EOF or TIMEOUT (which are not compiled regular
-        expressions). This method is similar to the expect() method except that
-        expect_list() does not recompile the pattern list on every call. This
-        may help if you are trying to optimize for speed, otherwise just use
-        the expect() method.  This is called by expect(). If timeout==-1 then
-        the self.timeout value is used. If searchwindowsize==-1 then the
-        self.searchwindowsize value is used. """
-
-        return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
-
-    def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
-        """This is similar to expect(), but uses plain string matching instead
-        of compiled regular expressions in 'pattern_list'. The 'pattern_list'
-        may be a string; a list or other sequence of strings; or TIMEOUT and
-        EOF.
-
-        This call might be faster than expect() for two reasons: string
-        searching is faster than RE matching and it is possible to limit the
-        search to just the end of the input buffer.
-
-        This method is also useful when you don't want to have to worry about
-        escaping regular expression characters that you want to match."""
-
-        if type(pattern_list) in types.StringTypes or pattern_list in (TIMEOUT, EOF):
-            pattern_list = [pattern_list]
-        return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
-
-    def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
-
-        """This is the common loop used inside expect. The 'searcher' should be
-        an instance of searcher_re or searcher_string, which describes how and what
-        to search for in the input.
-
-        See expect() for other arguments, return value and exceptions. """
-
-        self.searcher = searcher
-
-        if timeout == -1:
-            timeout = self.timeout
-        if timeout is not None:
-            end_time = time.time() + timeout 
-        if searchwindowsize == -1:
-            searchwindowsize = self.searchwindowsize
-
-        try:
-            incoming = self.buffer
-            freshlen = len(incoming)
-            while True: # Keep reading until exception or return.
-                index = searcher.search(incoming, freshlen, searchwindowsize)
-                if index >= 0:
-                    self.buffer = incoming[searcher.end : ]
-                    self.before = incoming[ : searcher.start]
-                    self.after = incoming[searcher.start : searcher.end]
-                    self.match = searcher.match
-                    self.match_index = index
-                    return self.match_index
-                # No match at this point
-                if timeout < 0 and timeout is not None:
-                    raise TIMEOUT ('Timeout exceeded in expect_any().')
-                # Still have time left, so read more data
-                c = self.read_nonblocking (self.maxread, timeout)
-                freshlen = len(c)
-                time.sleep (0.0001)
-                incoming = incoming + c
-                if timeout is not None:
-                    timeout = end_time - time.time()
-        except EOF, e:
-            self.buffer = ''
-            self.before = incoming
-            self.after = EOF
-            index = searcher.eof_index
-            if index >= 0:
-                self.match = EOF
-                self.match_index = index
-                return self.match_index
-            else:
-                self.match = None
-                self.match_index = None
-                raise EOF (str(e) + '\n' + str(self))
-        except TIMEOUT, e:
-            self.buffer = incoming
-            self.before = incoming
-            self.after = TIMEOUT
-            index = searcher.timeout_index
-            if index >= 0:
-                self.match = TIMEOUT
-                self.match_index = index
-                return self.match_index
-            else:
-                self.match = None
-                self.match_index = None
-                raise TIMEOUT (str(e) + '\n' + str(self))
-        except:
-            self.before = incoming
-            self.after = None
-            self.match = None
-            self.match_index = None
-            raise
-
-    def getwinsize(self):
-
-        """This returns the terminal window size of the child tty. The return
-        value is a tuple of (rows, cols). """
-
-        TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
-        s = struct.pack('HHHH', 0, 0, 0, 0)
-        x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
-        return struct.unpack('HHHH', x)[0:2]
-
-    def setwinsize(self, r, c):
-
-        """This sets the terminal window size of the child tty. This will cause
-        a SIGWINCH signal to be sent to the child. This does not change the
-        physical window size. It changes the size reported to TTY-aware
-        applications like vi or curses -- applications that respond to the
-        SIGWINCH signal. """
-
-        # Check for buggy platforms. Some Python versions on some platforms
-        # (notably OSF1 Alpha and RedHat 7.1) truncate the value for
-        # termios.TIOCSWINSZ. It is not clear why this happens.
-        # These platforms don't seem to handle the signed int very well;
-        # yet other platforms like OpenBSD have a large negative value for
-        # TIOCSWINSZ and they don't have a truncate problem.
-        # Newer versions of Linux have totally different values for TIOCSWINSZ.
-        # Note that this fix is a hack.
-        TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
-        if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
-            TIOCSWINSZ = -2146929561 # Same bits, but with sign.
-        # Note, assume ws_xpixel and ws_ypixel are zero.
-        s = struct.pack('HHHH', r, c, 0, 0)
-        fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
-
-    def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
-
-        """This gives control of the child process to the interactive user (the
-        human at the keyboard). Keystrokes are sent to the child process, and
-        the stdout and stderr output of the child process is printed. This
-        simply echos the child stdout and child stderr to the real stdout and
-        it echos the real stdin to the child stdin. When the user types the
-        escape_character this method will stop. The default for
-        escape_character is ^]. This should not be confused with ASCII 27 --
-        the ESC character. ASCII 29 was chosen for historical merit because
-        this is the character used by 'telnet' as the escape character. The
-        escape_character will not be sent to the child process.
-
-        You may pass in optional input and output filter functions. These
-        functions should take a string and return a string. The output_filter
-        will be passed all the output from the child process. The input_filter
-        will be passed all the keyboard input from the user. The input_filter
-        is run BEFORE the check for the escape_character.
-
-        Note that if you change the window size of the parent the SIGWINCH
-        signal will not be passed through to the child. If you want the child
-        window size to change when the parent's window size changes then do
-        something like the following example::
-
-            import pexpect, struct, fcntl, termios, signal, sys
-            def sigwinch_passthrough (sig, data):
-                s = struct.pack("HHHH", 0, 0, 0, 0)
-                a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
-                global p
-                p.setwinsize(a[0],a[1])
-            p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
-            signal.signal(signal.SIGWINCH, sigwinch_passthrough)
-            p.interact()
-        """
-
-        # Flush the buffer.
-        self.stdout.write (self.buffer)
-        self.stdout.flush()
-        self.buffer = ''
-        mode = tty.tcgetattr(self.STDIN_FILENO)
-        tty.setraw(self.STDIN_FILENO)
-        try:
-            self.__interact_copy(escape_character, input_filter, output_filter)
-        finally:
-            tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
-
-    def __interact_writen(self, fd, data):
-
-        """This is used by the interact() method.
-        """
-
-        while data != '' and self.isalive():
-            n = os.write(fd, data)
-            data = data[n:]
-
-    def __interact_read(self, fd):
-
-        """This is used by the interact() method.
-        """
-
-        return os.read(fd, 1000)
-
-    def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
-
-        """This is used by the interact() method.
-        """
-
-        while self.isalive():
-            r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
-            if self.child_fd in r:
-                data = self.__interact_read(self.child_fd)
-                if output_filter: data = output_filter(data)
-                if self.logfile is not None:
-                    self.logfile.write (data)
-                    self.logfile.flush()
-                os.write(self.STDOUT_FILENO, data)
-            if self.STDIN_FILENO in r:
-                data = self.__interact_read(self.STDIN_FILENO)
-                if input_filter: data = input_filter(data)
-                i = data.rfind(escape_character)
-                if i != -1:
-                    data = data[:i]
-                    self.__interact_writen(self.child_fd, data)
-                    break
-                self.__interact_writen(self.child_fd, data)
-
-    def __select (self, iwtd, owtd, ewtd, timeout=None):
-
-        """This is a wrapper around select.select() that ignores signals. If
-        select.select raises a select.error exception and errno is an EINTR
-        error then it is ignored. Mainly this is used to ignore sigwinch
-        (terminal resize). """
-
-        # if select() is interrupted by a signal (errno==EINTR) then
-        # we loop back and enter the select() again.
-        if timeout is not None:
-            end_time = time.time() + timeout
-        while True:
-            try:
-                return select.select (iwtd, owtd, ewtd, timeout)
-            except select.error, e:
-                if e[0] == errno.EINTR:
-                    # if we loop back we have to subtract the amount of time we already waited.
-                    if timeout is not None:
-                        timeout = end_time - time.time()
-                        if timeout < 0:
-                            return ([],[],[])
-                else: # something else caused the select.error, so this really is an exception
-                    raise
-
-##############################################################################
-# The following methods are no longer supported or allowed.
-
-    def setmaxread (self, maxread):
-
-        """This method is no longer supported or allowed. I don't like getters
-        and setters without a good reason. """
-
-        raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the maxread member variable.')
-
-    def setlog (self, fileobject):
-
-        """This method is no longer supported or allowed.
-        """
-
-        raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the logfile member variable.')
-
-##############################################################################
-# End of spawn class
-##############################################################################
-
-class searcher_string (object):
-
-    """This is a plain string search helper for the spawn.expect_any() method.
-
-    Attributes:
-
-        eof_index     - index of EOF, or -1
-        timeout_index - index of TIMEOUT, or -1
-
-    After a successful match by the search() method the following attributes
-    are available:
-
-        start - index into the buffer, first byte of match
-        end   - index into the buffer, first byte after match
-        match - the matching string itself
-    """
-
-    def __init__(self, strings):
-
-        """This creates an instance of searcher_string. This argument 'strings'
-        may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
-
-        self.eof_index = -1
-        self.timeout_index = -1
-        self._strings = []
-        for n, s in zip(range(len(strings)), strings):
-            if s is EOF:
-                self.eof_index = n
-                continue
-            if s is TIMEOUT:
-                self.timeout_index = n
-                continue
-            self._strings.append((n, s))
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object."""
-
-        ss =  [ (ns[0],'    %d: "%s"' % ns) for ns in self._strings ]
-        ss.append((-1,'searcher_string:'))
-        if self.eof_index >= 0:
-            ss.append ((self.eof_index,'    %d: EOF' % self.eof_index))
-        if self.timeout_index >= 0:
-            ss.append ((self.timeout_index,'    %d: TIMEOUT' % self.timeout_index))
-        ss.sort()
-        ss = zip(*ss)[1]
-        return '\n'.join(ss)
-
-    def search(self, buffer, freshlen, searchwindowsize=None):
-
-        """This searches 'buffer' for the first occurence of one of the search
-        strings.  'freshlen' must indicate the number of bytes at the end of
-        'buffer' which have not been searched before. It helps to avoid
-        searching the same, possibly big, buffer over and over again.
-
-        See class spawn for the 'searchwindowsize' argument.
-
-        If there is a match this returns the index of that string, and sets
-        'start', 'end' and 'match'. Otherwise, this returns -1. """
-
-        absurd_match = len(buffer)
-        first_match = absurd_match
-
-        # 'freshlen' helps a lot here. Further optimizations could
-        # possibly include:
-        #
-        # using something like the Boyer-Moore Fast String Searching
-        # Algorithm; pre-compiling the search through a list of
-        # strings into something that can scan the input once to
-        # search for all N strings; realize that if we search for
-        # ['bar', 'baz'] and the input is '...foo' we need not bother
-        # rescanning until we've read three more bytes.
-        #
-        # Sadly, I don't know enough about this interesting topic. /grahn
-        
-        for index, s in self._strings:
-            if searchwindowsize is None:
-                # the match, if any, can only be in the fresh data,
-                # or at the very end of the old data
-                offset = -(freshlen+len(s))
-            else:
-                # better obey searchwindowsize
-                offset = -searchwindowsize
-            n = buffer.find(s, offset)
-            if n >= 0 and n < first_match:
-                first_match = n
-                best_index, best_match = index, s
-        if first_match == absurd_match:
-            return -1
-        self.match = best_match
-        self.start = first_match
-        self.end = self.start + len(self.match)
-        return best_index
-
-class searcher_re (object):
-
-    """This is regular expression string search helper for the
-    spawn.expect_any() method.
-
-    Attributes:
-
-        eof_index     - index of EOF, or -1
-        timeout_index - index of TIMEOUT, or -1
-
-    After a successful match by the search() method the following attributes
-    are available:
-
-        start - index into the buffer, first byte of match
-        end   - index into the buffer, first byte after match
-        match - the re.match object returned by a succesful re.search
-
-    """
-
-    def __init__(self, patterns):
-
-        """This creates an instance that searches for 'patterns' Where
-        'patterns' may be a list or other sequence of compiled regular
-        expressions, or the EOF or TIMEOUT types."""
-
-        self.eof_index = -1
-        self.timeout_index = -1
-        self._searches = []
-        for n, s in zip(range(len(patterns)), patterns):
-            if s is EOF:
-                self.eof_index = n
-                continue
-            if s is TIMEOUT:
-                self.timeout_index = n
-                continue
-            self._searches.append((n, s))
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object."""
-
-        ss =  [ (n,'    %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
-        ss.append((-1,'searcher_re:'))
-        if self.eof_index >= 0:
-            ss.append ((self.eof_index,'    %d: EOF' % self.eof_index))
-        if self.timeout_index >= 0:
-            ss.append ((self.timeout_index,'    %d: TIMEOUT' % self.timeout_index))
-        ss.sort()
-        ss = zip(*ss)[1]
-        return '\n'.join(ss)
-
-    def search(self, buffer, freshlen, searchwindowsize=None):
-
-        """This searches 'buffer' for the first occurence of one of the regular
-        expressions. 'freshlen' must indicate the number of bytes at the end of
-        'buffer' which have not been searched before.
-
-        See class spawn for the 'searchwindowsize' argument.
-        
-        If there is a match this returns the index of that string, and sets
-        'start', 'end' and 'match'. Otherwise, returns -1."""
-
-        absurd_match = len(buffer)
-        first_match = absurd_match
-        # 'freshlen' doesn't help here -- we cannot predict the
-        # length of a match, and the re module provides no help.
-        if searchwindowsize is None:
-            searchstart = 0
-        else:
-            searchstart = max(0, len(buffer)-searchwindowsize)
-        for index, s in self._searches:
-            match = s.search(buffer, searchstart)
-            if match is None:
-                continue
-            n = match.start()
-            if n < first_match:
-                first_match = n
-                the_match = match
-                best_index = index
-        if first_match == absurd_match:
-            return -1
-        self.start = first_match
-        self.match = the_match
-        self.end = self.match.end()
-        return best_index
-
-def which (filename):
-
-    """This takes a given filename; tries to find it in the environment path;
-    then checks if it is executable. This returns the full path to the filename
-    if found and executable. Otherwise this returns None."""
-
-    # Special case where filename already contains a path.
-    if os.path.dirname(filename) != '':
-        if os.access (filename, os.X_OK):
-            return filename
-
-    if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
-        p = os.defpath
-    else:
-        p = os.environ['PATH']
-
-    # Oddly enough this was the one line that made Pexpect
-    # incompatible with Python 1.5.2.
-    #pathlist = p.split (os.pathsep)
-    pathlist = string.split (p, os.pathsep)
-
-    for path in pathlist:
-        f = os.path.join(path, filename)
-        if os.access(f, os.X_OK):
-            return f
-    return None
-
-def split_command_line(command_line):
-
-    """This splits a command line into a list of arguments. It splits arguments
-    on spaces, but handles embedded quotes, doublequotes, and escaped
-    characters. It's impossible to do this with a regular expression, so I
-    wrote a little state machine to parse the command line. """
-
-    arg_list = []
-    arg = ''
-
-    # Constants to name the states we can be in.
-    state_basic = 0
-    state_esc = 1
-    state_singlequote = 2
-    state_doublequote = 3
-    state_whitespace = 4 # The state of consuming whitespace between commands.
-    state = state_basic
-
-    for c in command_line:
-        if state == state_basic or state == state_whitespace:
-            if c == '\\': # Escape the next character
-                state = state_esc
-            elif c == r"'": # Handle single quote
-                state = state_singlequote
-            elif c == r'"': # Handle double quote
-                state = state_doublequote
-            elif c.isspace():
-                # Add arg to arg_list if we aren't in the middle of whitespace.
-                if state == state_whitespace:
-                    None # Do nothing.
-                else:
-                    arg_list.append(arg)
-                    arg = ''
-                    state = state_whitespace
-            else:
-                arg = arg + c
-                state = state_basic
-        elif state == state_esc:
-            arg = arg + c
-            state = state_basic
-        elif state == state_singlequote:
-            if c == r"'":
-                state = state_basic
-            else:
-                arg = arg + c
-        elif state == state_doublequote:
-            if c == r'"':
-                state = state_basic
-            else:
-                arg = arg + c
-
-    if arg != '':
-        arg_list.append(arg)
-    return arg_list
-
-# vi:ts=4:sw=4:expandtab:ft=python:


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/visualization/_MPL.c
--- a/yt/visualization/_MPL.c
+++ b/yt/visualization/_MPL.c
@@ -139,6 +139,7 @@
   xiter[0] = yiter[0] = 0;
   xiterv[0] = yiterv[0] = 0.0;
 
+  Py_BEGIN_ALLOW_THREADS
   for(i=0;i<rows;i++)for(j=0;j<cols;j++)
       *(npy_float64*) PyArray_GETPTR2(my_array, i, j) = 0.0;
   for(p=0;p<nx;p++)
@@ -187,6 +188,7 @@
       }
     }
   }
+  Py_END_ALLOW_THREADS
 
   // Attatch output buffer to output buffer
 


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,7 +49,8 @@
     splat_points, \
     annotate_image, \
     apply_colormap, \
-    scale_image
+    scale_image, \
+    write_projection
 
 from plot_modifications import \
     PlotCallback, \


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -327,3 +327,87 @@
     im = image.copy()
     au.add_points_to_image(im, points_x, points_y, val)
     return im
+
+def write_projection(data, filename, colorbar=True, colorbar_label=None, 
+                    title=None, limits=None, take_log=True, var_fig_size=False):
+    r"""Write a projection or volume rendering to disk with a variety of 
+    pretty parameters such as limits, title, colorbar, etc.  write_projection
+    uses the standard matplotlib interface to create the figure.  N.B. This code
+    only works *after* you have created the projection using the standard 
+    framework (i.e. the Camera interface or off_axis_projection).
+
+    Accepts an NxM sized array representing the projection itself as well
+    as the filename to which you will save this figure.  
+
+    Parameters
+    ----------
+    data : array_like 
+        image array as output by off_axis_projection or camera.snapshot()
+    filename : string 
+        the filename where the data will be saved
+    colorbar : boolean
+        do you want a colorbar generated to the right of the image?
+    colorbar_label : string
+        the label associated with your colorbar
+    title : string
+        the label at the top of the figure
+    limits : 2-element array_like
+        the lower limit and the upper limit to be plotted in the figure 
+        of the data array
+    take_log : boolean
+        plot the log of the data array (and take the log of the limits if set)?
+    var_fig_size : boolean
+        If we want the resolution (and size) of the output image to scale 
+        with the resolution of the image array.  
+
+    Examples
+    --------
+
+    >>> image = off_axis_projection(pf, c, L, W, N, "Density", no_ghost=False)
+    >>> write_projection(image, 'test.png', 
+                         colorbar_label="Column Density (cm$^{-2}$)", 
+                         title="Offaxis Projection", limits=(1e-3,1e-5), 
+                         take_log=True)
+    """
+    import pylab as pl
+
+    # If this is rendered as log, then apply now.
+    if take_log:
+        data = na.log10(data)
+        if limits is not None:
+            limits = na.log10(limits)
+
+
+    # Create the figure and paint the data on
+    fig = pl.figure()
+    ax = fig.add_subplot(111)
+
+    if limits is not None:
+        cax = ax.imshow(data, vmin=limits[0], vmax=limits[1])
+    else:
+        cax = ax.imshow(data)
+
+    if title:
+        ax.set_title(title)
+
+    # Suppress the x and y pixel counts
+    ax.set_xticks(())
+    ax.set_yticks(())
+
+    # Add a color bar and label if requested
+    if colorbar:
+        cbar = fig.colorbar(cax)
+        if colorbar_label:
+            cbar.ax.set_ylabel(colorbar_label)
+
+    # If we want the resolution of the image to scale with the resolution
+    # of the image array. we increase the dpi value accordingly
+    if var_fig_size:
+        N = data.shape[0]
+        mag_factor = N/480.
+        pl.savefig(filename, dpi=100*mag_factor)
+    else:
+        pl.savefig(filename)
+
+    pl.clf()
+    pl.close()


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -211,9 +211,9 @@
         Only ONE of the following options can be specified. If all 3 are
         specified, they will be used in the following precedence order:
 
-        * `ticks` - a list of floating point numbers at which to put ticks
-        * `minmaxtick` - display DEFAULT ticks with min & max also displayed
-        * `nticks` - if ticks not specified, can automatically determine a
+        * ``ticks`` - a list of floating point numbers at which to put ticks
+        * ``minmaxtick`` - display DEFAULT ticks with min & max also displayed
+        * ``nticks`` - if ticks not specified, can automatically determine a
           number of ticks to be evenly spaced in log space
         """
         for plot in self.plots:
@@ -1713,9 +1713,9 @@
     r"""Construct a multiple axes plot object, with or without a colorbar, into
     which multiple plots may be inserted.
 
-    This will create a set of `matplotlib.axes.Axes`, all lined up into a grid,
-    which are then returned to the user and which can be used to plot multiple
-    plots on a single figure.
+    This will create a set of :class:`matplotlib.axes.Axes`, all lined up into
+    a grid, which are then returned to the user and which can be used to plot
+    multiple plots on a single figure.
 
     Parameters
     ----------
@@ -1733,12 +1733,12 @@
 
     Returns
     -------
-    fig : `matplotlib.figure.Figure
+    fig : :class:`matplotlib.figure.Figure`
         The figure created inside which the axes reside
-    tr : list of list of `matplotlib.axes.Axes` objects
+    tr : list of list of :class:`matplotlib.axes.Axes` objects
         This is a list, where the inner list is along the x-axis and the outer
         is along the y-axis
-    cbars : list of `matplotlib.axes.Axes` objects
+    cbars : list of :class:`matplotlib.axes.Axes` objects
         Each of these is an axes onto which a colorbar can be placed.
 
     Notes


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -170,10 +170,11 @@
 
         Only ONE of the following options can be specified. If all 3 are
         specified, they will be used in the following precedence order:
-            ticks - a list of floating point numbers at which to put ticks
-            minmaxtick - display DEFAULT ticks with min & max also displayed
-            nticks - if ticks not specified, can automatically determine a
-               number of ticks to be evenly spaced in log space
+
+        * ``ticks`` - a list of floating point numbers at which to put ticks
+        * ``minmaxtick`` - display DEFAULT ticks with min & max also displayed
+        * ``nticks`` - if ticks not specified, can automatically determine a
+          number of ticks to be evenly spaced in log space
         """
         # This next call fixes some things, but is slower...
         self._redraw_image()


diff -r 6251ac233b88bee5ddff33ecefb4da3b179eacce -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -804,6 +804,59 @@
                              oc.sub_samples, oc.pf)
         return (left_camera, right_camera)
 
+class FisheyeCamera(Camera):
+    def __init__(self, center, radius, fov, resolution,
+                 transfer_function = None, fields = None,
+                 sub_samples = 5, log_fields = None, volume = None,
+                 pf = None, no_ghost=False):
+        ParallelAnalysisInterface.__init__(self)
+        if pf is not None: self.pf = pf
+        self.center = na.array(center, dtype='float64')
+        self.radius = radius
+        self.fov = fov
+        if iterable(resolution):
+            raise RuntimeError("Resolution must be a single int")
+        self.resolution = resolution
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields)
+        self.volume = volume
+
+    def snapshot(self):
+        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        # We now follow figures 4-7 of:
+        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+        # ...but all in Cython.
+        vp = arr_fisheye_vectors(self.resolution, self.fov)
+        vp.shape = (self.resolution**2,1,3)
+        uv = na.ones(3, dtype='float64')
+        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        vector_plane = VectorPlane(positions, vp, self.center,
+                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Rendering fisheye of %s^2", self.resolution)
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+
+        total_cells = 0
+        for brick in self.volume.traverse(None, self.center, image):
+            brick.cast_plane(tfp, vector_plane)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        image.shape = (self.resolution, self.resolution, 3)
+        return image
+
+
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None):
     r"""Project through a parameter file, off-axis, and return the image plane.



https://bitbucket.org/yt_analysis/yt/changeset/178c1796fc82/
changeset:   178c1796fc82
branch:      yt
user:        MatthewTurk
date:        2012-01-10 16:59:49
summary:     It appears that the image ordering for orthogonal projections is actually
fortran-ordered.
affected #:  2 files

diff -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 -r 178c1796fc822d466796f58eb1c78a072c91341c yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -260,7 +260,6 @@
         cdef np.float64_t width[3] 
         for i in range(3):
             width[i] = self.width[i]
-        #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
         with nogil, parallel():
             idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
             idata.supp_data = self.supp_data
@@ -296,7 +295,6 @@
             free(v_dir)
             free(idata)
             free(v_pos)
-        #print self.aimage.max()
         return hit
 
 cdef void projection_sampler(


diff -r d0037d21f2e0b9ac23b60714f5bbdb734d85b0d7 -r 178c1796fc822d466796f58eb1c78a072c91341c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -915,7 +915,7 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     image = na.zeros((resolution, resolution, 3), dtype='float64',
-                      order='C')
+                      order='F')
     normal_vector, north_vector, east_vector = ortho_find(normal_vector)
     unit_vectors = [north_vector, east_vector, normal_vector]
     back_center= center - 0.5*width * normal_vector
@@ -940,7 +940,6 @@
                 na.maximum(ma, this_point, ma)
     # Now we have a bounding box.
     grids = pf.h.region(center, mi, ma)._grids
-    print len(grids), len(pf.h.grids)
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [(grid[field] * grid.child_mask).astype("float64")



https://bitbucket.org/yt_analysis/yt/changeset/e9b18b3226f7/
changeset:   e9b18b3226f7
branch:      yt
user:        MatthewTurk
date:        2012-01-12 16:40:16
summary:     Adding an inner_radius option to the HEALpix projection
affected #:  1 file

diff -r 178c1796fc822d466796f58eb1c78a072c91341c -r e9b18b3226f793ea2efa3e4359dca5cface422f2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -961,7 +961,7 @@
     return image[:,:,0]
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      rotation = None):
+                      inner_radius = 0.05, rotation = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -988,6 +988,15 @@
         If supplied, the field will be pre-multiplied by this, then divided by
         the integrated value of this field.  This returns an average rather
         than a sum.
+    inner_radius : optional, float, defaults to 0.05
+        The radius of the inner clipping plane.  To avoid unphysical local
+        effects (i.e., octopole moments in the resultant image) the starting
+        position for each vector will be expanded outward by this, times the
+        radius, times the individual directional vectors.
+    rotation : optional, 3x3 array
+        If supplied, the vectors will be rotated by this.  You can construct
+        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
     -------
@@ -1019,7 +1028,6 @@
     nv = 12*nside**2
     image = na.zeros((nv,1,3), dtype='float64', order='C')
     vs = arr_pix2vec_nest(nside, na.arange(nv))
-    vs += na.random.random(vs.shape)*1e-10 - 0.5e-10
     vs *= radius
     vs.shape = (nv,1,3)
     if rotation is not None:
@@ -1027,6 +1035,7 @@
         for i in range(3):
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions += inner_radius * vs
     uv = na.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),



https://bitbucket.org/yt_analysis/yt/changeset/8250338aa6d6/
changeset:   8250338aa6d6
branch:      yt
user:        MatthewTurk
date:        2012-01-12 17:58:35
summary:     Change to using units of local_dx for the inner_radius in healpix rendering.
Remove extraneous return values.
affected #:  1 file

diff -r e9b18b3226f793ea2efa3e4359dca5cface422f2 -r 8250338aa6d6f55f90b99c19f0be74b9337a456c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -961,7 +961,7 @@
     return image[:,:,0]
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 0.05, rotation = None):
+                      inner_radius = 10, rotation = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -989,10 +989,9 @@
         the integrated value of this field.  This returns an average rather
         than a sum.
     inner_radius : optional, float, defaults to 0.05
-        The radius of the inner clipping plane.  To avoid unphysical local
-        effects (i.e., octopole moments in the resultant image) the starting
-        position for each vector will be expanded outward by this, times the
-        radius, times the individual directional vectors.
+        The radius of the inner clipping plane, in units of the dx at the point
+        at which the volume rendering is centered.  This avoids unphysical
+        effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling na.array([v1,v2,v3]) where those are the
@@ -1028,14 +1027,17 @@
     nv = 12*nside**2
     image = na.zeros((nv,1,3), dtype='float64', order='C')
     vs = arr_pix2vec_nest(nside, na.arange(nv))
-    vs *= radius
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
         for i in range(3):
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
-    positions += inner_radius * vs
+    dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
+    print inner_radius * dx
+    insert_ipython()
+    positions += inner_radius * dx * vs
+    vs *= radius
     uv = na.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
@@ -1059,7 +1061,7 @@
     else:
         image[:,:,0] /= image[:,:,1]
         pf.field_info.pop("temp_weightfield")
-    return image[:,0,0], (vs, positions, image)
+    return image[:,0,0]
 
 def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
                         take_log = True, resolution=512):



https://bitbucket.org/yt_analysis/yt/changeset/029e0d6bd097/
changeset:   029e0d6bd097
branch:      yt
user:        MatthewTurk
date:        2012-01-12 18:01:53
summary:     Oops, extraneous debugging.
affected #:  1 file

diff -r 8250338aa6d6f55f90b99c19f0be74b9337a456c -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1034,8 +1034,6 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
-    print inner_radius * dx
-    insert_ipython()
     positions += inner_radius * dx * vs
     vs *= radius
     uv = na.ones(3, dtype='float64')



https://bitbucket.org/yt_analysis/yt/changeset/88621ddd4ac8/
changeset:   88621ddd4ac8
branch:      yt
user:        samskillman
date:        2012-01-11 23:13:07
summary:     First go at adding a light source to the rendering, done within the
new rendering refactor. Currently enabled with use_light=(False/True)
in the camera object creation, and direction and light color are
controlled by cam.light_dir (3 floats) and cam.light_rgba (4 floats).
affected #:  6 files

diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 yt/utilities/_amr_utils/FixedInterpolator.c
--- a/yt/utilities/_amr_utils/FixedInterpolator.c
+++ b/yt/utilities/_amr_utils/FixedInterpolator.c
@@ -128,7 +128,7 @@
 }
 
 void eval_gradient(int ds[3], npy_float64 dp[3],
-				  npy_float64 *data, npy_float64 grad[3])
+				  npy_float64 *data, npy_float64 *grad)
 {
     // We just take some small value
 


diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 yt/utilities/_amr_utils/FixedInterpolator.h
--- a/yt/utilities/_amr_utils/FixedInterpolator.h
+++ b/yt/utilities/_amr_utils/FixedInterpolator.h
@@ -41,7 +41,7 @@
 npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
 				  npy_float64 *data);
 
-void eval_gradient(int ds[3], npy_float64 dp[3], npy_float64 *data, npy_float64 grad[3]);
+void eval_gradient(int ds[3], npy_float64 dp[3], npy_float64 *data, npy_float64 *grad);
 
 void vertex_interp(npy_float64 v1, npy_float64 v2, npy_float64 isovalue,
                    npy_float64 vl[3], npy_float64 dds[3],


diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -25,7 +25,7 @@
 
 cimport cython
 cimport numpy as np
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, fabs
 
 cdef struct FieldInterpolationTable:
     # Note that we make an assumption about retaining a reference to values
@@ -91,3 +91,30 @@
     for i in range(3):
         ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
         rgba[i] = dt*trgba[i] + ta * rgba[i]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void FIT_eval_transfer_with_light(np.float64_t dt, np.float64_t *dvs, 
+        np.float64_t *grad, np.float64_t *l_dir, np.float64_t *l_rgba,
+        np.float64_t *rgba, int n_fits,
+        FieldInterpolationTable fits[6],
+        int field_table_ids[6]) nogil:
+    cdef int i, fid, use
+    cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+    dot_prod = 0.0
+    for i in range(3):
+        dot_prod += l_dir[i]*grad[i]
+    #dot_prod = fmax(0.0, dot_prod)
+    for i in range(6): istorage[i] = 0.0
+    for i in range(n_fits):
+        istorage[i] = FIT_get_value(&fits[i], dvs)
+    for i in range(n_fits):
+        fid = fits[i].weight_table_id
+        if fid != -1: istorage[i] *= istorage[fid]
+    for i in range(6):
+        trgba[i] = istorage[field_table_ids[i]]
+    for i in range(3):
+        ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
+        rgba[i] = dt*trgba[i] + ta * rgba[i] + dt*dot_prod*l_rgba[i]*trgba[i]*l_rgba[3] #(trgba[0]+trgba[1]+trgba[2])
+


diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 yt/utilities/_amr_utils/fp_utils.pxd
--- a/yt/utilities/_amr_utils/fp_utils.pxd
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -42,6 +42,10 @@
     if f0 < f1: return f0
     return f1
 
+cdef inline np.float64_t fabs(np.float64_t f0) nogil:
+    if f0 < 0.0: return -f0
+    return f0
+
 cdef inline int iclip(int i, int a, int b) nogil:
     if i < a: return a
     if i > b: return b


diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -31,7 +31,8 @@
 from stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 from field_interpolation_tables cimport \
-    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer
+    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
+    FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
 
 from cython.parallel import prange, parallel, threadid
@@ -307,7 +308,7 @@
     cdef ImageAccumulator *im = <ImageAccumulator *> data
     cdef int i
     cdef np.float64_t dl = (exit_t - enter_t)
-    cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2]
+    cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2] 
     for i in range(imin(3, vc.n_fields)):
         im.rgba[i] += vc.data[i][di] * dl
 
@@ -324,6 +325,8 @@
     np.float64_t star_er
     np.float64_t star_sigma_num
     kdtree_utils.kdtree *star_list
+    np.float64_t *light_dir
+    np.float64_t *light_rgba
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -368,6 +371,46 @@
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void volume_render_gradient_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6]
+    cdef np.float64_t *grad
+    grad = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vri.n_samples):
+        for j in range(vc.n_fields):
+            dvs[j] = offset_interpolate(vc.dims, dp,
+                    vc.data[j] + offset)
+        eval_gradient(vc.dims, dp, vc.data[0] + offset, grad)
+        FIT_eval_transfer_with_light(dt, dvs, grad, 
+                vri.light_dir, vri.light_rgba,
+                im.rgba, vri.n_fits, 
+                vri.fits, vri.field_table_ids)
+        for j in range(3):
+            dp[j] += ds[j]
+    free(grad)
+
 cdef class star_kdtree_container:
     cdef kdtree_utils.kdtree *tree
     cdef public np.float64_t sigma
@@ -524,6 +567,68 @@
         free(self.vra.fits)
         free(self.vra)
 
+cdef class LightSourceRenderSampler(ImageSampler):
+    cdef VolumeRenderAccumulator *vra
+    cdef public object tf_obj
+    cdef public object my_field_tables
+    def __cinit__(self, 
+                  np.ndarray vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
+                  tf_obj, n_samples = 10,
+                  light_dir=[1.,1.,1.],
+                  light_rgba=[1.,1.,1.,1.]):
+        ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+                               x_vec, y_vec, width)
+        cdef int i
+        cdef np.ndarray[np.float64_t, ndim=1] temp
+        # Now we handle tf_obj
+        self.vra = <VolumeRenderAccumulator *> \
+            malloc(sizeof(VolumeRenderAccumulator))
+        self.vra.fits = <FieldInterpolationTable *> \
+            malloc(sizeof(FieldInterpolationTable) * 6)
+        self.vra.n_fits = tf_obj.n_field_tables
+        assert(self.vra.n_fits <= 6)
+        self.vra.n_samples = n_samples
+        self.vra.light_dir = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+        self.vra.light_rgba = <np.float64_t *> malloc(sizeof(np.float64_t) * 4)
+        light_dir /= np.sqrt(light_dir[0]**2 + light_dir[1]**2 + light_dir[2]**2)
+        for i in range(3):
+            self.vra.light_dir[i] = light_dir[i]
+        for i in range(4):
+            self.vra.light_rgba[i] = light_rgba[i]
+        self.my_field_tables = []
+        for i in range(self.vra.n_fits):
+            temp = tf_obj.tables[i].y
+            FIT_initialize_table(&self.vra.fits[i],
+                      temp.shape[0],
+                      <np.float64_t *> temp.data,
+                      tf_obj.tables[i].x_bounds[0],
+                      tf_obj.tables[i].x_bounds[1],
+                      tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
+                      tf_obj.weight_table_ids[i])
+            self.my_field_tables.append((tf_obj.tables[i],
+                                         tf_obj.tables[i].y))
+        for i in range(6):
+            self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
+        self.supp_data = <void *> self.vra
+
+    def setup(self, PartitionedGrid pg):
+        self.sampler = volume_render_gradient_sampler
+
+    def __dealloc__(self):
+        return
+        free(self.vra.fits)
+        free(self.vra)
+        free(self.light_dir)
+        free(self.light_rgba)
+
+
 cdef class GridFace:
     cdef int direction
     cdef public np.float64_t coord
@@ -684,16 +789,16 @@
            0.0 <= tl and tl < intersect_t:
             direction = i
             intersect_t = tl
-    if enter_t >= 0.0: intersect_t = enter_t
+    if enter_t >= 0.0: intersect_t = enter_t 
     if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
     for i in range(3):
         # Two things have to be set inside this loop.
         # cur_ind[i], the current index of the grid cell the ray is in
         # tmax[i], the 't' until it crosses out of the grid cell
-        tdelta[i] = step[i] * iv_dir[i] * vc.dds[i]
+        tdelta[i] = step[i] * iv_dir[i] * vc.dds[i] 
         if i == direction and step[i] > 0:
             # Intersection with the left face in this direction
-            cur_ind[i] = 0
+            cur_ind[i] = 0 
         elif i == direction and step[i] < 0:
             # Intersection with the right face in this direction
             cur_ind[i] = vc.dims[i] - 1


diff -r 2477a2f831147992fbe47aaab8e82ef3c9b8c0bd -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -43,6 +43,7 @@
 
 from yt.utilities.amr_utils import \
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
+    LightSourceRenderSampler, \
     arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest, \
     pixelize_healpix
 
@@ -55,7 +56,7 @@
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
                  tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 le=None, re=None, use_light=False):
         r"""A viewpoint into a volume, for volume rendering.
 
         The camera represents the eye of an observer, which will be used to
@@ -215,6 +216,9 @@
         self.use_kd = use_kd
         self.l_max = l_max
         self.no_ghost = no_ghost
+        self.use_light = use_light
+        self.light_dir = None
+        self.light_rgba = None
         if self.no_ghost:
             mylog.info('Warning: no_ghost is currently True (default). This may lead to artifacts at grid boundaries.')
         self.tree_type = tree_type
@@ -358,7 +362,19 @@
                 image, self.unit_vectors[0], self.unit_vectors[1],
                 na.array(self.width),
                 self.transfer_function, self.sub_samples)
-        sampler = VolumeRenderSampler(*args)
+        if self.use_light:
+            if self.light_dir is None:
+                self.set_default_light_dir()
+            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = self.light_dir[0] * self.unit_vectors[1] + \
+                    self.light_dir[1] * self.unit_vectors[2] + \
+                    self.light_dir[2] * self.unit_vectors[0]
+            if self.light_rgba is None:
+                self.set_default_light_rgba()
+            sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
+                    light_rgba=self.light_rgba)
+        else:
+            sampler = VolumeRenderSampler(*args)
         self.volume.initialize_source()
 
         pbar = get_pbar("Ray casting",
@@ -379,6 +395,12 @@
 
         return image
 
+    def set_default_light_dir(self):
+        self.light_dir = [1.,1.,1.]
+
+    def set_default_light_rgba(self):
+        self.light_rgba = [1.,1.,1.,1.]
+
     def zoom(self, factor):
         r"""Change the distance to the focal point.
 
@@ -464,8 +486,8 @@
                     self.center += (na.array(final) - self.center) / (10. * n_steps)
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
-	    else:
-		dW = 1.0
+            else:
+                dW = 1.0
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
@@ -474,8 +496,8 @@
                     width = na.array([final_width, final_width, final_width]) 
                     # front/back, left/right, top/bottom
                 dW = (1.0*final_width-na.array(self.width))/n_steps
-	    else:
-		dW = 1.0
+            else:
+                dW = 1.0
             dx = (na.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:



https://bitbucket.org/yt_analysis/yt/changeset/b7610f1b5b45/
changeset:   b7610f1b5b45
branch:      yt
user:        samskillman
date:        2012-01-11 23:14:00
summary:     Merging
affected #:  70 files

diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@
 
 import setuptools
 
-VERSION = "2.3dev"
+VERSION = "2.4dev"
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -32,6 +32,7 @@
 import numpy as na
 import random
 import sys
+import os.path as path
 from collections import defaultdict
 
 from yt.funcs import *
@@ -1360,15 +1361,16 @@
         # The halos are listed in order in the file.
         lines = file("%s.txt" % self.basename)
         locations = []
+        realpath = path.realpath("%s.txt" % self.basename)
         for line in lines:
             line = line.split()
             # Prepend the hdf5 file names with the full path.
             temp = []
             for item in line[1:]:
-                if item[0] == "/":
-                    temp.append(item)
-                else:
-                    temp.append(self.pf.fullpath + '/' + item)
+                # This assumes that the .txt is in the same place as
+                # the h5 files, which is a good one I think.
+                item = item.split("/")
+                temp.append(path.join(path.dirname(realpath), item[-1]))
             locations.append(temp)
         lines.close()
         return locations


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -86,6 +86,9 @@
 "ChildHaloID3", "ChildHaloFrac3",
 "ChildHaloID4", "ChildHaloFrac4"]
 
+NumNeighbors = 15
+NumDB = 5
+
 class DatabaseFunctions(object):
     # Common database functions so it doesn't have to be repeated.
     def _open_database(self):
@@ -366,9 +369,9 @@
         child_points = na.array(child_points)
         fKD.pos = na.asfortranarray(child_points.T)
         fKD.qv = na.empty(3, dtype='float64')
-        fKD.dist = na.empty(5, dtype='float64')
-        fKD.tags = na.empty(5, dtype='int64')
-        fKD.nn = 5
+        fKD.dist = na.empty(NumNeighbors, dtype='float64')
+        fKD.tags = na.empty(NumNeighbors, dtype='int64')
+        fKD.nn = NumNeighbors
         fKD.sort = True
         fKD.rearrange = True
         create_tree(0)
@@ -395,7 +398,7 @@
                 nIDs.append(n)
             # We need to fill in fake halos if there aren't enough halos,
             # which can happen at high redshifts.
-            while len(nIDs) < 5:
+            while len(nIDs) < NumNeighbors:
                 nIDs.append(-1)
             candidates[row[0]] = nIDs
         
@@ -405,12 +408,12 @@
         self.candidates = candidates
         
         # This stores the masses contributed to each child candidate.
-        self.child_mass_arr = na.zeros(len(candidates)*5, dtype='float64')
+        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors, dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
         for i,halo in enumerate(sorted(candidates)):
             for j, child in enumerate(candidates[halo]):
-                self.child_mass_loc[halo][child] = i*5 + j
+                self.child_mass_loc[halo][child] = i*NumNeighbors + j
 
     def _build_h5_refs(self, filename):
         # For this snapshot, add lists of file names that contain the
@@ -618,8 +621,8 @@
         result = self.cursor.fetchone()
         while result:
             mass = result[0]
-            self.child_mass_arr[mark:mark+5] /= mass
-            mark += 5
+            self.child_mass_arr[mark:mark+NumNeighbors] /= mass
+            mark += NumNeighbors
             result = self.cursor.fetchone()
         
         # Get the global ID for the SnapHaloID=0 from the child, this will
@@ -642,14 +645,15 @@
                 # We need to get the GlobalHaloID for this child.
                 child_globalID = baseChildID + child
                 child_indexes.append(child_globalID)
-                child_per.append(self.child_mass_arr[i*5 + j])
+                child_per.append(self.child_mass_arr[i*NumNeighbors + j])
             # Sort by percentages, desending.
             child_per, child_indexes = zip(*sorted(zip(child_per, child_indexes), reverse=True))
             values = []
-            for pair in zip(child_indexes, child_per):
+            for pair_count, pair in enumerate(zip(child_indexes, child_per)):
+                if pair_count == NumDB: break
                 values.extend([int(pair[0]), float(pair[1])])
             #values.extend([parent_currt, parent_halo])
-            # This has the child ID, child percent listed five times, followed
+            # This has the child ID, child percent listed NumDB times, followed
             # by the currt and this parent halo ID (SnapHaloID).
             #values = tuple(values)
             self.write_values.append(values)
@@ -841,7 +845,7 @@
          [1609, 0.0]]
         """
         parents = []
-        for i in range(5):
+        for i in range(NumDB):
             string = "SELECT GlobalHaloID, ChildHaloFrac%d FROM Halos\
             WHERE ChildHaloID%d=%d;" % (i, i, GlobalHaloID)
             self.cursor.execute(string)


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,5 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    shift_projections, \
+    get_halo_sphere, \
     standard_fields


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -46,7 +46,8 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, \
     parallel_blocking_call, \
-    parallel_root_only
+    parallel_root_only, \
+    parallel_objects
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.visualization.image_writer import write_image
@@ -66,7 +67,7 @@
                  recenter = None,
                  profile_output_dir='radial_profiles', projection_output_dir='projections',
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
-                 velocity_center=['bulk', 'halo'], filter_quantities=['id','center'], 
+                 velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'], 
                  use_critical_density=False):
         r"""Initialize a Halo Profiler object.
         
@@ -184,7 +185,6 @@
         self._halo_filters = []
         self.all_halos = []
         self.filtered_halos = []
-        self._projection_halo_list = []
 
         # Create output directory if specified
         if self.output_dir is not None:
@@ -351,7 +351,8 @@
             
         """
 
-        self.profile_fields.append({'field':field, 'weight_field':weight_field, 'accumulation':accumulation})
+        self.profile_fields.append({'field':field, 'weight_field':weight_field, 
+                                    'accumulation':accumulation})
 
     def add_projection(self, field, weight_field=None, cmap='algae'):
         r"""Make a projection of the specified field.
@@ -453,7 +454,7 @@
 
         # Profile all halos.
         updated_halos = []
-        for halo in self._get_objs('all_halos', round_robin=True):
+        for halo in parallel_objects(self.all_halos, -1):
             # Apply prefilters to avoid profiling unwanted halos.
             filter_result = True
             haloQuantities = {}
@@ -509,7 +510,7 @@
 
     def _get_halo_profile(self, halo, filename, virial_filter=True,
             force_write=False):
-        """Profile a single halo and write profile data to a file.
+        r"""Profile a single halo and write profile data to a file.
         If file already exists, read profile data from file.
         Return a dictionary of id, center, and virial quantities if virial_filter is True.
         """
@@ -527,39 +528,9 @@
                 mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
-            sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
-            if len(sphere._grids) == 0: return None
-            new_sphere = False
-
-            if self.recenter:
-                old = halo['center']
-                if self.recenter in centering_registry:
-                    new_x, new_y, new_z = \
-                        centering_registry[self.recenter](sphere)
-                else:
-                    # user supplied function
-                    new_x, new_y, new_z = self.recenter(sphere)
-                if new_x < self.pf.domain_left_edge[0] or \
-                        new_y < self.pf.domain_left_edge[1] or \
-                        new_z < self.pf.domain_left_edge[2]:
-                    mylog.info("Recentering rejected, skipping halo %d" % \
-                        halo['id'])
-                    return None
-                halo['center'] = [new_x, new_y, new_z]
-                d = self.pf['kpc'] * periodic_dist(old, halo['center'],
-                    self.pf.domain_right_edge - self.pf.domain_left_edge)
-                mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
-                # Expand the halo to account for recentering. 
-                halo['r_max'] += d / 1000 # d is in kpc -> want mpc
-                new_sphere = True
-
-            if new_sphere:
-                # Temporary solution to memory leak.
-                for g in self.pf.h.grids:
-                    g.clear_data()
-                sphere.clear_data()
-                del sphere
-                sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+            # get a sphere object to profile
+            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            if sphere is None: return None
 
             if self._need_bulk_velocity:
                 # Set bulk velocity to zero out radial velocity profiles.
@@ -567,7 +538,9 @@
                     if self.velocity_center[1] == 'halo':
                         sphere.set_field_parameter('bulk_velocity', halo['velocity'])
                     elif self.velocity_center[1] == 'sphere':
-                        sphere.set_field_parameter('bulk_velocity', sphere.quantities['BulkVelocity'](lazy_reader=False, preload=False))
+                        sphere.set_field_parameter('bulk_velocity', 
+                                                   sphere.quantities['BulkVelocity'](lazy_reader=False, 
+                                                                                     preload=False))
                     else:
                         mylog.error("Invalid parameter: VelocityCenter.")
                 elif self.velocity_center[0] == 'max':
@@ -645,18 +618,18 @@
 
         # Get list of halos for projecting.
         if halo_list == 'filtered':
-            self._halo_projection_list = self.filtered_halos
+            halo_projection_list = self.filtered_halos
         elif halo_list == 'all':
-            self._halo_projection_list = self.all_halos
+            halo_projection_list = self.all_halos
         elif isinstance(halo_list, types.StringType):
-            self._halo_projection_list = self._read_halo_list(halo_list)
+            halo_projection_list = self._read_halo_list(halo_list)
         elif isinstance(halo_list, types.ListType):
-            self._halo_projection_list = halo_list
+            halo_projection_list = halo_list
         else:
             mylog.error("Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list.")
             return
 
-        if len(self._halo_projection_list) == 0:
+        if len(halo_projection_list) == 0:
             mylog.error("Halo list for projections is empty.")
             return
 
@@ -665,7 +638,8 @@
             proj_level = self.pf.h.max_level
         else:
             proj_level = int(self.project_at_level)
-        proj_dx = self.pf.units[self.projection_width_units] / self.pf.parameters['TopGridDimensions'][0] / \
+        proj_dx = self.pf.units[self.projection_width_units] / \
+            self.pf.parameters['TopGridDimensions'][0] / \
             (self.pf.parameters['RefineBy']**proj_level)
         projectionResolution = int(self.projection_width / proj_dx)
 
@@ -678,21 +652,25 @@
             my_output_dir = "%s/%s" % (self.pf.fullpath, self.projection_output_dir)
         self.__check_directory(my_output_dir)
 
-        center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] + self.pf.parameters['DomainRightEdge'][w])
+        center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] + 
+                         self.pf.parameters['DomainRightEdge'][w])
                   for w in range(self.pf.parameters['TopGridRank'])]
 
-        for halo in self._get_objs('_halo_projection_list', round_robin=True):
+        for halo in parallel_objects(halo_projection_list, -1):
             if halo is None:
                 continue
             # Check if region will overlap domain edge.
             # Using non-periodic regions is faster than using periodic ones.
-            leftEdge = [(halo['center'][w] - 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
+            leftEdge = [(halo['center'][w] - 
+                         0.5 * self.projection_width/self.pf.units[self.projection_width_units])
                         for w in range(len(halo['center']))]
-            rightEdge = [(halo['center'][w] + 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
+            rightEdge = [(halo['center'][w] + 
+                          0.5 * self.projection_width/self.pf.units[self.projection_width_units])
                          for w in range(len(halo['center']))]
 
             mylog.info("Projecting halo %04d in region: [%f, %f, %f] to [%f, %f, %f]." %
-                       (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2], rightEdge[0], rightEdge[1], rightEdge[2]))
+                       (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2], 
+                        rightEdge[0], rightEdge[1], rightEdge[2]))
 
             need_per = False
             for w in range(len(halo['center'])):
@@ -719,13 +697,13 @@
                 for hp in self.projection_fields:
                     projections.append(self.pf.h.proj(w, hp['field'], 
                                                       weight_field=hp['weight_field'], 
-                                                      data_source=region, center=halo['center'],
+                                                      source=region, center=halo['center'],
                                                       serialize=False))
                 
                 # Set x and y limits, shift image if it overlaps domain boundary.
                 if need_per:
                     pw = self.projection_width/self.pf.units[self.projection_width_units]
-                    #shift_projections(self.pf, projections, halo['center'], center, w)
+                    _shift_projections(self.pf, projections, halo['center'], center, w)
                     # Projection has now been shifted to center of box.
                     proj_left = [center[x_axis]-0.5*pw, center[y_axis]-0.5*pw]
                     proj_right = [center[x_axis]+0.5*pw, center[y_axis]+0.5*pw]
@@ -756,11 +734,85 @@
                         if save_images:
                             filename = "%s/Halo_%04d_%s_%s.png" % (my_output_dir, halo['id'], 
                                                                    dataset_name, axis_labels[w])
-                            write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                            if (frb[hp['field']] != 0).any():
+                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                            else:
+                                mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
+                                            (hp['field'], halo['id']))
                     if save_cube: output.close()
 
             del region
 
+    @parallel_blocking_call
+    def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
+                             analysis_output_dir=None):
+        r"""Perform custom analysis on all halos.
+        
+        This will loop through all halo on the HaloProfiler's list, 
+        creating a sphere object for each halo and passing that sphere 
+        to the provided analysis function.
+        
+        Parameters
+        ---------
+        analysis_function : function
+            A function taking two arguments, the halo dictionary, and a 
+            sphere object.
+            Example function to calculate total mass of halo:
+                def my_analysis(halo, sphere):
+                    total_mass = sphere.quantities['TotalMass']()
+                    print total_mass
+        halo_list : {'filtered', 'all'}
+            Which set of halos to make profiles of, either ones passed by the
+            halo filters (if enabled/added), or all halos.
+            Default='filtered'.
+        analysis_output_dir : string, optional
+            If specified, this directory will be created within the dataset to 
+            contain any output from the analysis function.  Default: None.
+
+        Examples
+        --------
+        >>> hp.analyze_halo_spheres(my_analysis, halo_list="filtered",
+                                    analysis_output_dir='special_analysis')
+        
+        """
+
+        # Get list of halos for projecting.
+        if halo_list == 'filtered':
+            halo_analysis_list = self.filtered_halos
+        elif halo_list == 'all':
+            halo_analysis_list = self.all_halos
+        elif isinstance(halo_list, types.StringType):
+            halo_analysis_list = self._read_halo_list(halo_list)
+        elif isinstance(halo_list, types.ListType):
+            halo_analysis_list = halo_list
+        else:
+            mylog.error("Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list.")
+            return
+
+        if len(halo_analysis_list) == 0:
+            mylog.error("Halo list for analysis is empty.")
+            return
+
+        # Create output directory.
+        if analysis_output_dir is not None:
+            if self.output_dir is not None:
+                self.__check_directory("%s/%s" % (self.output_dir, self.pf.directory))
+                my_output_dir = "%s/%s/%s" % (self.output_dir, self.pf.directory, 
+                                              analysis_output_dir)
+            else:
+                my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
+            self.__check_directory(my_output_dir)
+
+        for halo in parallel_objects(halo_analysis_list, -1):
+            if halo is None: continue
+
+            # Get a sphere object to analze.
+            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            if sphere is None: continue
+
+            # Call the given analysis function.
+            analysis_function(halo, sphere)
+
     def _add_actual_overdensity(self, profile):
         "Calculate overdensity from TotalMassMsun and CellVolume fields."
 
@@ -917,7 +969,8 @@
     def _run_hop(self, hop_file):
         "Run hop to get halos."
 
-        hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args, **self.halo_finder_kwargs)
+        hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args, 
+                                                **self.halo_finder_kwargs)
         hop_results.write_out(hop_file)
 
         del hop_results
@@ -989,7 +1042,95 @@
         else:
             os.mkdir(my_output_dir)
 
-def shift_projections(pf, projections, oldCenter, newCenter, axis):
+def get_halo_sphere(halo, pf, recenter=None):
+    r"""Returns a sphere object for a given halo.
+        
+    With a dictionary containing halo properties, such as center 
+    and r_max, this creates a sphere object and optionally 
+    recenters and recreates the sphere using a recentering function.
+    This is to be used primarily to make spheres for a set of halos 
+    loaded by the HaloProfiler.
+    
+    Parameters
+    ----------
+    halo : dict, required
+        The dictionary containing halo properties used to make the sphere.
+        Required entries:
+            center : list with center coordinates.
+            r_max : sphere radius in Mpc.
+    pf : parameter file object, required
+        The parameter file from which the sphere will be made.
+    recenter : {None, string or function}
+        The exact location of the sphere center can significantly affect 
+        radial profiles.  The halo center loaded by the HaloProfiler will 
+        typically be the dark matter center of mass calculated by a halo 
+        finder.  However, this may not be the best location for centering 
+        profiles of baryon quantities.  For example, one may want to center 
+        on the maximum density.
+        If recenter is given as a string, one of the existing recentering 
+        functions will be used:
+            Min_Dark_Matter_Density : location of minimum dark matter density
+            Max_Dark_Matter_Density : location of maximum dark matter density
+            CoM_Dark_Matter_Density : dark matter center of mass
+            Min_Gas_Density : location of minimum gas density
+            Max_Gas_Density : location of maximum gas density
+            CoM_Gas_Density : gas center of mass
+            Min_Total_Density : location of minimum total density
+            Max_Total_Density : location of maximum total density
+            CoM_Total_Density : total center of mass
+            Min_Temperature : location of minimum temperature
+            Max_Temperature : location of maximum temperature
+        Alternately, a function can be supplied for custom recentering.
+        The function should take only one argument, a sphere object.
+            Example function:
+                def my_center_of_mass(data):
+                   my_x, my_y, my_z = data.quantities['CenterOfMass']()
+                   return (my_x, my_y, my_z)
+
+        Examples: this should primarily be used with the halo list of the HaloProfiler.
+        This is an example with an abstract halo asssuming a pre-defined pf.
+        >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
+        >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
+        >>> # Assuming the above example function has been defined.
+        >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
+    """
+        
+    sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
+    if len(sphere._grids) == 0: return None
+    new_sphere = False
+
+    if recenter:
+        old = halo['center']
+        if recenter in centering_registry:
+            new_x, new_y, new_z = \
+                centering_registry[recenter](sphere)
+        else:
+            # user supplied function
+            new_x, new_y, new_z = recenter(sphere)
+        if new_x < pf.domain_left_edge[0] or \
+                new_y < pf.domain_left_edge[1] or \
+                new_z < pf.domain_left_edge[2]:
+            mylog.info("Recentering rejected, skipping halo %d" % \
+                halo['id'])
+            return None
+        halo['center'] = [new_x, new_y, new_z]
+        d = pf['kpc'] * periodic_dist(old, halo['center'],
+            pf.domain_right_edge - pf.domain_left_edge)
+        mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+        # Expand the halo to account for recentering. 
+        halo['r_max'] += d / 1000 # d is in kpc -> want mpc
+        new_sphere = True
+
+    if new_sphere:
+        # Temporary solution to memory leak.
+        for g in pf.h.grids:
+            g.clear_data()
+        sphere.clear_data()
+        del sphere
+        sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
+    return sphere
+
+def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """
     Shift projection data around.
     This is necessary when projecting a preiodic region.
@@ -1059,14 +1200,19 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
+        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, 
+                                                add2_x_px, add2_y_px])
+        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, 
+                                                add2_x_py, add2_y_py])
+        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, 
+                                                 add2_x_pdx, add2_y_pdx])
+        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, 
+                                                 add2_x_pdy, add2_y_pdy])
+        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, 
+                                                 add2_x_field, add2_y_field])
         plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
-                                                    add_x_weight_field, add_y_weight_field, 
-                                                    add2_x_weight_field, add2_y_weight_field])
+                                                          add_x_weight_field, add_y_weight_field, 
+                                                          add2_x_weight_field, add2_y_weight_field])
 
         # Delete original copies of hanging cells.
         del add_x_px, add_y_px, add2_x_px, add2_y_px


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -96,6 +96,8 @@
             self._pf.current_redshift) # seconds
         # Build the distribution.
         self.build_dist()
+        # Attach some convenience arrays.
+        self.attach_arrays()
 
     def build_dist(self):
         """
@@ -127,6 +129,47 @@
         # We will want the time taken between bins.
         self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
     
+    def attach_arrays(self):
+        """
+        Attach convenience arrays to the class for easy access.
+        """
+        if self.mode == 'data_source':
+            try:
+                vol = self._data_source.volume('mpc')
+            except AttributeError:
+                # If we're here, this is probably a HOPHalo object, and we
+                # can get the volume this way.
+                ds = self._data_source.get_sphere()
+                vol = ds.volume('mpc')
+        elif self.mode == 'provided':
+            vol = self.volume
+        tc = self._pf["Time"]
+        self.time = []
+        self.lookback_time = []
+        self.redshift = []
+        self.Msol_yr = []
+        self.Msol_yr_vol = []
+        self.Msol = []
+        self.Msol_cumulative = []
+        # Use the center of the time_bin, not the left edge.
+        for i, time in enumerate((self.time_bins[1:] + self.time_bins[:-1])/2.):
+            self.time.append(time * tc / YEAR)
+            self.lookback_time.append((self.time_now - time * tc)/YEAR)
+            self.redshift.append(self.cosm.ComputeRedshiftFromTime(time * tc))
+            self.Msol_yr.append(self.mass_bins[i] / \
+                (self.time_bins_dt[i] * tc / YEAR))
+            self.Msol_yr_vol.append(self.mass_bins[i] / \
+                (self.time_bins_dt[i] * tc / YEAR) / vol)
+            self.Msol.append(self.mass_bins[i])
+            self.Msol_cumulative.append(self.cum_mass_bins[i])
+        self.time = na.array(self.time)
+        self.lookback_time = na.array(self.lookback_time)
+        self.redshift = na.array(self.redshift)
+        self.Msol_yr = na.array(self.Msol_yr)
+        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
+        self.Msol = na.array(self.Msol)
+        self.Msol_cumulative = na.array(self.Msol_cumulative)
+    
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
         order.
@@ -150,31 +193,21 @@
         >>> sfr.write_out("stars-SFR.out")
         """
         fp = open(name, "w")
-        if self.mode == 'data_source':
-            try:
-                vol = self._data_source.volume('mpc')
-            except AttributeError:
-                # If we're here, this is probably a HOPHalo object, and we
-                # can get the volume this way.
-                ds = self._data_source.get_sphere()
-                vol = ds.volume('mpc')
-        elif self.mode == 'provided':
-            vol = self.volume
-        tc = self._pf["Time"]
-        # Use the center of the time_bin, not the left edge.
         fp.write("#time\tlookback\tredshift\tMsol/yr\tMsol/yr/Mpc3\tMsol\tcumMsol\t\n")
-        for i, time in enumerate((self.time_bins[1:] + self.time_bins[:-1])/2.):
+        for i, time in enumerate(self.time):
             line = "%1.5e %1.5e %1.5e %1.5e %1.5e %1.5e %1.5e\n" % \
-            (time * tc / YEAR, # Time
-            (self.time_now - time * tc)/YEAR, # Lookback time
-            self.cosm.ComputeRedshiftFromTime(time * tc), # Redshift
-            self.mass_bins[i] / (self.time_bins_dt[i] * tc / YEAR), # Msol/yr
-            self.mass_bins[i] / (self.time_bins_dt[i] * tc / YEAR) / vol, # Msol/yr/vol
-            self.mass_bins[i], # Msol in bin
-            self.cum_mass_bins[i]) # cumulative
+            (time, # Time
+            self.lookback_time[i], # Lookback time
+            self.redshift[i], # Redshift
+            self.Msol_yr[i], # Msol/yr
+            self.Msol_yr_vol[i], # Msol/yr/vol
+            self.Msol[i], # Msol in bin
+            self.Msol_cumulative[i]) # cumulative
             fp.write(line)
         fp.close()
 
+### Begin Synthetic Spectrum Stuff. ####
+
 CHABRIER = {
 "Z0001" : "bc2003_hr_m22_chab_ssp.ised.h5", #/* 0.5% */
 "Z0004" : "bc2003_hr_m32_chab_ssp.ised.h5", #/* 2% */


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -42,6 +42,7 @@
     __global_parallel_size = '1',
     __topcomm_parallel_rank = '0',
     __topcomm_parallel_size = '1',
+    __command_line = 'False',
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -54,6 +54,8 @@
     TrilinearFieldInterpolator
 from yt.utilities.parameter_file_storage import \
     ParameterFileStore
+from yt.utilities.minimal_representation import \
+    MinimalProjectionData
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -89,6 +91,20 @@
         return tr
     return save_state
 
+def restore_field_information_state(func):
+    """
+    A decorator that takes a function with the API of (self, grid, field)
+    and ensures that after the function is called, the field_parameters will
+    be returned to normal.
+    """
+    def save_state(self, grid, field=None, *args, **kwargs):
+        old_params = grid.field_parameters
+        grid.field_parameters = self.field_parameters
+        tr = func(self, grid, field, *args, **kwargs)
+        grid.field_parameters = old_params
+        return tr
+    return save_state
+
 def cache_mask(func):
     """
     For computationally intensive indexing operations, we can cache
@@ -212,7 +228,7 @@
         self._point_indices = {}
         self._vc_data = {}
         for key, val in kwargs.items():
-            mylog.info("Setting %s to %s", key, val)
+            mylog.debug("Setting %s to %s", key, val)
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
@@ -382,9 +398,10 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
-    def __repr__(self):
+    def __repr__(self, clean = False):
         # We'll do this the slow way to be clear what's going on
-        s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        if clean: s = "%s: " % (self.__class__.__name__)
+        else: s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
                        for i in self._con_args])
         return s
@@ -811,6 +828,38 @@
             self[field] = temp_data[field]
 
     def to_frb(self, width, resolution, center = None):
+        r"""This function returns a FixedResolutionBuffer generated from this
+        object.
+
+        A FixedResolutionBuffer is an object that accepts a variable-resolution
+        2D object and transforms it into an NxM bitmap that can be plotted,
+        examined or processed.  This is a convenience function to return an FRB
+        directly from an existing 2D data object.
+
+        Parameters
+        ----------
+        width : width specifier
+            This can either be a floating point value, in the native domain
+            units of the simulation, or a tuple of the (value, unit) style.
+            This will be the width of the FRB.
+        resolution : int or tuple of ints
+            The number of pixels on a side of the final FRB.
+        center : array-like of floats, optional
+            The center of the FRB.  If not specified, defaults to the center of
+            the current object.
+
+        Returns
+        -------
+        frb : :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
+            A fixed resolution buffer, which can be queried for fields.
+
+        Examples
+        --------
+
+        >>> proj = pf.h.proj(0, "Density")
+        >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
+        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        """
         if center is None:
             center = self.get_field_parameter("center")
             if center is None:
@@ -1221,6 +1270,52 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
+    def to_frb(self, width, resolution):
+        r"""This function returns an ObliqueFixedResolutionBuffer generated
+        from this object.
+
+        An ObliqueFixedResolutionBuffer is an object that accepts a
+        variable-resolution 2D object and transforms it into an NxM bitmap that
+        can be plotted, examined or processed.  This is a convenience function
+        to return an FRB directly from an existing 2D data object.  Unlike the
+        corresponding to_frb function for other AMR2DData objects, this does
+        not accept a 'center' parameter as it is assumed to be centered at the
+        center of the cutting plane.
+
+        Parameters
+        ----------
+        width : width specifier
+            This can either be a floating point value, in the native domain
+            units of the simulation, or a tuple of the (value, unit) style.
+            This will be the width of the FRB.
+        resolution : int or tuple of ints
+            The number of pixels on a side of the final FRB.
+
+        Returns
+        -------
+        frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
+            A fixed resolution buffer, which can be queried for fields.
+
+        Examples
+        --------
+
+        >>> v, c = pf.h.find_max("Density")
+        >>> sp = pf.h.sphere(c, (100.0, 'au'))
+        >>> L = sp.quantities["AngularMomentumVector"]()
+        >>> cutting = pf.h.cutting(L, c)
+        >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
+        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        """
+        if iterable(width):
+            w, u = width
+            width = w/self.pf[u]
+        if not iterable(resolution):
+            resolution = (resolution, resolution)
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
+        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
+        return frb
+
 class AMRFixedResCuttingPlaneBase(AMR2DData):
     """
     AMRFixedResCuttingPlaneBase is an oblique plane through the data,
@@ -1516,6 +1611,10 @@
         self._refresh_data()
         if self._okay_to_serialize and self.serialize: self._serialize(node_name=self._node_name)
 
+    @property
+    def _mrep(self):
+        return MinimalProjectionData(self)
+
     def _convert_field_name(self, field):
         if field == "weight_field": return "weight_field_%s" % self._weight
         if field in self._key_fields: return field
@@ -2443,14 +2542,8 @@
         verts = []
         samples = []
         for i, g in enumerate(self._get_grid_objs()):
-            mask = self._get_cut_mask(g) * g.child_mask
-            vals = g.get_vertex_centered_data(field)
-            if sample_values is not None:
-                svals = g.get_vertex_centered_data(sample_values)
-            else:
-                svals = None
-            my_verts = march_cubes_grid(value, vals, mask, g.LeftEdge, g.dds,
-                                        svals)
+            my_verts = self._extract_isocontours_from_grid(
+                            g, field, value, sample_values)
             if sample_values is not None:
                 my_verts, svals = my_verts
                 samples.append(svals)
@@ -2477,6 +2570,20 @@
             return verts, samples
         return verts
 
+
+    @restore_grid_state
+    def _extract_isocontours_from_grid(self, grid, field, value,
+                                       sample_values = None):
+        mask = self._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field)
+        if sample_values is not None:
+            svals = grid.get_vertex_centered_data(sample_values)
+        else:
+            svals = None
+        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
+                                    grid.dds, svals)
+        return my_verts
+
     def calculate_isocontour_flux(self, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
         r"""This identifies isocontours on a cell-by-cell basis, with no
@@ -2543,19 +2650,25 @@
         """
         flux = 0.0
         for g in self._get_grid_objs():
-            mask = self._get_cut_mask(g) * g.child_mask
-            vals = g.get_vertex_centered_data(field)
-            if fluxing_field is None:
-                ff = na.ones(vals.shape, dtype="float64")
-            else:
-                ff = g.get_vertex_centered_data(fluxing_field)
-            xv, yv, zv = [g.get_vertex_centered_data(f) for f in 
-                         [field_x, field_y, field_z]]
-            flux += march_cubes_grid_flux(value, vals, xv, yv, zv,
-                        ff, mask, g.LeftEdge, g.dds)
+            flux += self._calculate_flux_in_grid(g, field, value,
+                    field_x, field_y, field_z, fluxing_field)
         flux = self.comm.mpi_allreduce(flux, op="sum")
         return flux
 
+    @restore_grid_state
+    def _calculate_flux_in_grid(self, grid, field, value,
+                    field_x, field_y, field_z, fluxing_field = None):
+        mask = self._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field)
+        if fluxing_field is None:
+            ff = na.ones(vals.shape, dtype="float64")
+        else:
+            ff = grid.get_vertex_centered_data(fluxing_field)
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+                     [field_x, field_y, field_z]]
+        return march_cubes_grid_flux(value, vals, xv, yv, zv,
+                    ff, mask, grid.LeftEdge, grid.dds)
+
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                 log_space=True, cumulative=True, cache=False):
         """
@@ -2855,12 +2968,6 @@
                  & (r <= self._radius))
         return cm
 
-    def volume(self, unit="unitary"):
-        """
-        Return the volume of the cylinder in units of *unit*.
-        """
-        return math.pi * (self._radius)**2. * self._height * pf[unit]**3
-
 class AMRInclinedBox(AMR3DData):
     _type_name="inclined_box"
     _con_args = ('origin','box_vectors')
@@ -3430,7 +3537,7 @@
                                    output_field, output_left)
             self.field_data[field] = output_field
 
-    @restore_grid_state
+    @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
         fields = ensure_list(fields)
         g_fields = [grid[field].astype("float64") for field in fields]
@@ -3523,6 +3630,19 @@
                     self._some_overlap.append(grid)
                     continue
     
+    def __repr__(self):
+        # We'll do this the slow way to be clear what's going on
+        s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        s += "["
+        for i, region in enumerate(self.regions):
+            if region in ["OR", "AND", "NOT", "(", ")"]:
+                s += region
+            else:
+                s += region.__repr__(clean = True)
+            if i < (len(self.regions) - 1): s += ", "
+        s += "]"
+        return s
+    
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -133,7 +133,6 @@
             if weight:
                 f[u] /= w[u]
             self[field] = f
-        self["myweight"] = w
         self["UsedBins"] = u
 
     def add_fields(self, fields, weight = "CellMassMsun", accumulation = False, fractional=False):


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -37,6 +37,8 @@
     output_type_registry
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
+from yt.utilities.minimal_representation import \
+    MinimalStaticOutput
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -115,6 +117,10 @@
         except ImportError:
             return s.replace(";", "*")
 
+    @property
+    def _mrep(self):
+        return MinimalStaticOutput(self)
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         return False


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -882,6 +882,8 @@
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
 add_field("VorticitySquared", function=_VorticitySquared,
-          validators=[ValidateSpatial(1)],
+          validators=[ValidateSpatial(1,
+              ["x-velocity","y-velocity","z-velocity"])],
           units=r"\rm{s}^{-2}",
           convert_function=_convertVorticitySquared)
+


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -484,6 +484,15 @@
         if self.num_grids > 40:
             starter = na.random.randint(0, 20)
             random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            # We also add in a bit to make sure that some of the grids have
+            # particles
+            gwp = self.grid_particle_count > 0
+            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+                # We just add one grid.  This is not terribly efficient.
+                first_grid = na.where(gwp)[0][0]
+                random_sample.resize((21,))
+                random_sample[-1] = first_grid
+                mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
             random_sample = na.mgrid[0:max(len(self.grids)-1,1)].astype("int32")


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -296,11 +296,12 @@
 def _dmpdensity(field, data):
     blank = na.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    if 'creation_time' in data.keys():
+    if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
         filter = na.ones(data.NumberOfParticles, dtype='bool')
+    if not filter.any(): return blank
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
                            data["particle_position_y"][filter].astype(na.float64),
                            data["particle_position_z"][filter].astype(na.float64),


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -316,6 +316,13 @@
             self.current_time = \
                 float(self._find_parameter("real", "time", scalar=True))
 
+        if self._flash_version == 7:
+            self.parameters['timestep'] = float(
+                self._handle["simulation parameters"]["timestep"])
+        else:
+            self.parameters['timestep'] = \
+                float(self._find_parameter("real", "dt", scalar=True))
+
         try:
             use_cosmo = self._find_parameter("logical", "usecosmology") 
         except:


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -1,13 +1,14 @@
 """
-API for yt.frontends.chombo
+API for yt.frontends.gdf
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Author: J.S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: MSU
-Homepage: http://yt.Chombotools.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -1,12 +1,15 @@
 """
-Data structures for Chombo.
+Data structures for GDF.
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Matthew Turk, J. S. Oishi.  All Rights Reserved.
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -76,7 +79,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._fhandle = h5py.File(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename,'r')
         AMRHierarchy.__init__(self,pf,data_style)
 
         self._fhandle.close()
@@ -94,31 +97,31 @@
 
     def _count_grids(self):
         self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
-        
+       
     def _parse_hierarchy(self):
         f = self._fhandle 
-        
-        # this relies on the first Group in the H5 file being
-        # 'Chombo_global'
-        levels = f.listnames()[1:]
         dxs=[]
         self.grids = na.empty(self.num_grids, dtype='object')
-        for i, grid in enumerate(f['data'].keys()):
-            self.grids[i] = self.grid(i, self, f['grid_level'][i],
-                                      f['grid_left_index'][i],
-                                      f['grid_dimensions'][i])
-            self.grids[i]._level_id = f['grid_level'][i]
+        levels = (f['grid_level'][:]).copy()
+        glis = (f['grid_left_index'][:]).copy()
+        gdims = (f['grid_dimensions'][:]).copy()
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+            self.grids[i]._level_id = levels[i]
 
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
-            dx = dx/self.parameter_file.refine_by**(f['grid_level'][i])
+            dx = dx/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
         dx = na.array(dxs)
-        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
-        self.grid_dimensions = f['grid_dimensions'][:].astype("int32")
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
         self.grid_particle_count = f['grid_particle_count'][:]
-
+        del levels, glis, gdims
+ 
     def _populate_grid_objects(self):
         for g in self.grids:
             g._prepare_grid()
@@ -130,9 +133,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -171,7 +171,11 @@
         # This should be improved.
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
-            self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+            try:
+                self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+            except:
+                self.units[field_name] = 1.0
+
         self._handle.close()
         del self._handle
         
@@ -181,7 +185,9 @@
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
         self.domain_dimensions = sp["domain_dimensions"][:]
-        self.refine_by = sp["refine_by"]
+        refine_by = sp["refine_by"]
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by 
         self.dimensionality = sp["dimensionality"]
         self.current_time = sp["current_time"]
         self.unique_identifier = sp["unique_identifier"]
@@ -198,6 +204,7 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
         del self._handle


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,11 +1,14 @@
 """
 GDF-specific fields
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2009-2011 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
 
   This file is part of yt.
 
@@ -53,40 +56,31 @@
 add_gdf_field = KnownGDFFields.add_field
 
 add_gdf_field("density", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("density")],
           units=r"\rm{g}/\rm{cm}^3",
           projected_units =r"\rm{g}/\rm{cm}^2")
 
 add_gdf_field("specific_energy", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("specific_energy")],
           units=r"\rm{erg}/\rm{g}")
 
 add_gdf_field("pressure", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("pressure")],
           units=r"\rm{erg}/\rm{g}")
 
-add_gdf_field("velocity_x", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("velocity_x")],
+add_gdf_field("velocity_x", function=NullFunc, take_log=False,
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("velocity_y", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("velocity_y")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("velocity_z", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("velocity_z")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_x")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_y")],
           units=r"\rm{cm}/\rm{s}")
 
 add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("mag_field_z")],
           units=r"\rm{cm}/\rm{s}")
 
 for f,v in log_translation_dict.items():


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -1,6 +1,8 @@
 """
 The data-file handling functions
 
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
 Author: Matthew Turk <matthewturk at gmail.com>
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
@@ -35,38 +37,33 @@
     def _field_dict(self,fhandle):
         keys = fhandle['field_types'].keys()
         val = fhandle['field_types'].keys()
-        # ncomp = int(fhandle['/'].attrs['num_components'])
-        # temp =  fhandle['/'].attrs.listitems()[-ncomp:]
-        # val, keys = zip(*temp)
-        # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
         
     def _read_field_names(self,grid):
         fhandle = h5py.File(grid.filename,'r')
-        return fhandle['field_types'].keys()
+        names = fhandle['field_types'].keys()
+        fhandle.close()
+        return names
     
     def _read_data_set(self,grid,field):
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        return fhandle['/data/grid_%010i/'%grid.id+field][:]
-        # field_dict = self._field_dict(fhandle)
-        # lstring = 'level_%i' % grid.Level
-        # lev = fhandle[lstring]
-        # dims = grid.ActiveDimensions
-        # boxsize = dims.prod()
-        
-        # grid_offset = lev[self._offset_string][grid._level_id]
-        # start = grid_offset+field_dict[field]*boxsize
-        # stop = start + boxsize
-        # data = lev[self._data_string][start:stop]
-
-        # return data.reshape(dims, order='F')
-                                          
+        data = (fhandle['/data/grid_%010i/'%grid.id+field][:]).copy()
+        fhandle.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
+        data = (fhandle['/data/grid_%010i/'%grid.id+field][:][sl]).copy()
+        fhandle.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
 
-    # return self._read_data_set(grid,field)[sl]
-


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -24,42 +24,15 @@
 """
 
 import time, types, signal, inspect, traceback, sys, pdb, os
-import warnings, struct
+import warnings, struct, subprocess
 from math import floor, ceil
 
 from yt.utilities.exceptions import *
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.progressbar as pb
 import yt.utilities.rpdb as rpdb
-
-# Some compatibility functions.  In the long run, these *should* disappear as
-# we move toward newer python versions.  Most were implemented to get things
-# running on DataStar.
-
-# If we're running on python2.4, we need a 'wraps' function
-def blank_wrapper(f):
-    return lambda a: a
-
-try:
-    from functools import wraps
-except ImportError:
-    wraps = blank_wrapper
-
-# We need to ensure that we have a defaultdict implementation
-
-class __defaultdict(dict):
-    def __init__(self, func):
-        self.__func = func
-        dict.__init__(self)
-    def __getitem__(self, key):
-        if not self.has_key(key):
-            self.__setitem__(key, self.__func())
-        return dict.__getitem__(self, key)
-
-try:
-    from collections import defaultdict
-except ImportError:
-    defaultdict = __defaultdict
+from collections import defaultdict
+from functools import wraps
 
 # Some functions for handling sequences and other types
 
@@ -78,7 +51,7 @@
     string to a list, for instance ensuring the *fields* as an argument is a
     list.
     """
-    if obj == None:
+    if obj is None:
         return [obj]
     if not isinstance(obj, types.ListType):
         return [obj]
@@ -385,18 +358,6 @@
 def signal_ipython(signo, frame):
     insert_ipython(2)
 
-# We use two signals, SIGUSR1 and SIGUSR2.  In a non-threaded environment,
-# we set up handlers to process these by printing the current stack and to
-# raise a RuntimeError.  The latter can be used, inside pdb, to catch an error
-# and then examine the current stack.
-try:
-    signal.signal(signal.SIGUSR1, signal_print_traceback)
-    mylog.debug("SIGUSR1 registered for traceback printing")
-    signal.signal(signal.SIGUSR2, signal_ipython)
-    mylog.debug("SIGUSR2 registered for IPython Insertion")
-except ValueError:  # Not in main thread
-    pass
-
 def paste_traceback(exc_type, exc, tb):
     """
     This is a traceback handler that knows how to paste to the pastebin.
@@ -450,29 +411,6 @@
     dec_s = ''.join([ chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)) ])
     print dec_s
 
-# If we recognize one of the arguments on the command line as indicating a
-# different mechanism for handling tracebacks, we attach one of those handlers
-# and remove the argument from sys.argv.
-#
-# This fallback is for Paraview:
-if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
-# Now, we check.
-if "--paste" in sys.argv:
-    sys.excepthook = paste_traceback
-    del sys.argv[sys.argv.index("--paste")]
-elif "--paste-detailed" in sys.argv:
-    sys.excepthook = paste_traceback_detailed
-    del sys.argv[sys.argv.index("--paste-detailed")]
-elif "--detailed" in sys.argv:
-    import cgitb; cgitb.enable(format="text")
-    del sys.argv[sys.argv.index("--detailed")]
-elif "--rpdb" in sys.argv:
-    sys.excepthook = rpdb.rpdb_excepthook
-    del sys.argv[sys.argv.index("--rpdb")]
-elif "--detailed" in sys.argv:
-    import cgitb; cgitb.enable(format="text")
-    del sys.argv[sys.argv.index("--detailed")]
-
 #
 # Some exceptions
 #
@@ -482,3 +420,103 @@
 
 class YTEmptyClass(object):
     pass
+
+def update_hg(path, skip_rebuild = False):
+    from mercurial import hg, ui, commands
+    f = open(os.path.join(path, "yt_updater.log"), "a")
+    u = ui.ui()
+    u.pushbuffer()
+    config_fn = os.path.join(path, ".hg", "hgrc")
+    print "Reading configuration from ", config_fn
+    u.readconfig(config_fn)
+    repo = hg.repository(u, path)
+    commands.pull(u, repo)
+    f.write(u.popbuffer())
+    f.write("\n\n")
+    u.pushbuffer()
+    commands.identify(u, repo)
+    if "+" in u.popbuffer():
+        print "Can't rebuild modules by myself."
+        print "You will have to do this yourself.  Here's a sample commands:"
+        print
+        print "    $ cd %s" % (path)
+        print "    $ hg up"
+        print "    $ %s setup.py develop" % (sys.executable)
+        return 1
+    print "Updating the repository"
+    f.write("Updating the repository\n\n")
+    commands.update(u, repo, check=True)
+    if skip_rebuild: return
+    f.write("Rebuilding modules\n\n")
+    p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
+                        stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+    stdout, stderr = p.communicate()
+    f.write(stdout)
+    f.write("\n\n")
+    if p.returncode:
+        print "BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))
+        sys.exit(1)
+    f.write("Successful!\n")
+    print "Updated successfully."
+
+def get_hg_version(path):
+    from mercurial import hg, ui, commands
+    u = ui.ui()
+    u.pushbuffer()
+    repo = hg.repository(u, path)
+    commands.identify(u, repo)
+    return u.popbuffer()
+
+def get_yt_version():
+    import pkg_resources
+    yt_provider = pkg_resources.get_provider("yt")
+    path = os.path.dirname(yt_provider.module_path)
+    version = _get_hg_version(path)[:12]
+    return version
+
+# This code snippet is modified from Georg Brandl
+def bb_apicall(endpoint, data, use_pass = True):
+    import urllib, urllib2
+    uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
+    # since bitbucket doesn't return the required WWW-Authenticate header when
+    # making a request without Authorization, we cannot use the standard urllib2
+    # auth handlers; we have to add the requisite header from the start
+    if data is not None:
+        data = urllib.urlencode(data)
+    req = urllib2.Request(uri, data)
+    if use_pass:
+        username = raw_input("Bitbucket Username? ")
+        password = getpass.getpass()
+        upw = '%s:%s' % (username, password)
+        req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
+    return urllib2.urlopen(req).read()
+
+def get_yt_supp():
+    supp_path = os.path.join(os.environ["YT_DEST"], "src",
+                             "yt-supplemental")
+    # Now we check that the supplemental repository is checked out.
+    if not os.path.isdir(supp_path):
+        print
+        print "*** The yt-supplemental repository is not checked ***"
+        print "*** out.  I can do this for you, but because this ***"
+        print "*** is a delicate act, I require you to respond   ***"
+        print "*** to the prompt with the word 'yes'.            ***"
+        print
+        response = raw_input("Do you want me to try to check it out? ")
+        if response != "yes":
+            print
+            print "Okay, I understand.  You can check it out yourself."
+            print "This command will do it:"
+            print
+            print "$ hg clone http://hg.yt-project.org/yt-supplemental/ ",
+            print "%s" % (supp_path)
+            print
+            sys.exit(1)
+        rv = commands.clone(uu,
+                "http://hg.yt-project.org/yt-supplemental/", supp_path)
+        if rv:
+            print "Something has gone wrong.  Quitting."
+            sys.exit(1)
+    # Now we think we have our supplemental repository.
+    return supp_path
+


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/3d.png
Binary file yt/gui/reason/html/images/3d.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/3d_tab.png
Binary file yt/gui/reason/html/images/3d_tab.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/binary.png
Binary file yt/gui/reason/html/images/binary.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/blockdevice.png
Binary file yt/gui/reason/html/images/blockdevice.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/blockdevice_tab.png
Binary file yt/gui/reason/html/images/blockdevice_tab.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/console.png
Binary file yt/gui/reason/html/images/console.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_down.png
Binary file yt/gui/reason/html/images/double_down.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_down_sm.png
Binary file yt/gui/reason/html/images/double_down_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_left.png
Binary file yt/gui/reason/html/images/double_left.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_left_sm.png
Binary file yt/gui/reason/html/images/double_left_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_right.png
Binary file yt/gui/reason/html/images/double_right.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_right_sm.png
Binary file yt/gui/reason/html/images/double_right_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_up.png
Binary file yt/gui/reason/html/images/double_up.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/double_up_sm.png
Binary file yt/gui/reason/html/images/double_up_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/graph.png
Binary file yt/gui/reason/html/images/graph.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/kivio_flw.png
Binary file yt/gui/reason/html/images/kivio_flw.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_down.png
Binary file yt/gui/reason/html/images/single_down.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_down_sm.png
Binary file yt/gui/reason/html/images/single_down_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_left.png
Binary file yt/gui/reason/html/images/single_left.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_left_sm.png
Binary file yt/gui/reason/html/images/single_left_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_right.png
Binary file yt/gui/reason/html/images/single_right.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_right_sm.png
Binary file yt/gui/reason/html/images/single_right_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_up.png
Binary file yt/gui/reason/html/images/single_up.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/single_up_sm.png
Binary file yt/gui/reason/html/images/single_up_sm.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/images/upload.png
Binary file yt/gui/reason/html/images/upload.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/leaflet/images/marker-shadow.png
Binary file yt/gui/reason/html/leaflet/images/marker-shadow.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/leaflet/images/marker.png
Binary file yt/gui/reason/html/leaflet/images/marker.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/leaflet/images/popup-close.png
Binary file yt/gui/reason/html/leaflet/images/popup-close.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/leaflet/images/zoom-in.png
Binary file yt/gui/reason/html/leaflet/images/zoom-in.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/gui/reason/html/leaflet/images/zoom-out.png
Binary file yt/gui/reason/html/leaflet/images/zoom-out.png has changed


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -35,6 +35,13 @@
 import numpy as na # For historical reasons
 import numpy # In case anyone wishes to use it by name
 
+# This next item will handle most of the actual startup procedures, but it will
+# also attempt to parse the command line and set up the global state of various
+# operations.
+
+import yt.startup_tasks as __startup_tasks
+unparsed_args = __startup_tasks.unparsed_args
+
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -108,7 +115,7 @@
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
-    apply_colormap, scale_image
+    apply_colormap, scale_image, write_projection
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
@@ -122,6 +129,10 @@
 
 from yt.convenience import all_pfs, max_spheres, load, projload
 
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles
+
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/startup_tasks.py
--- /dev/null
+++ b/yt/startup_tasks.py
@@ -0,0 +1,144 @@
+"""
+Very simple convenience function for importing all the modules, setting up
+the namespace and getting the last argument on the command line.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# This handles the command line.
+
+import argparse, os, sys
+
+from yt.config import ytcfg
+from yt.funcs import *
+
+exe_name = os.path.basename(sys.executable)
+# At import time, we determined whether or not we're being run in parallel.
+def turn_on_parallelism():
+    try:
+        from mpi4py import MPI
+        parallel_capable = (MPI.COMM_WORLD.size > 1)
+    except ImportError:
+        parallel_capable = False
+    if parallel_capable:
+        mylog.info("Global parallel computation enabled: %s / %s",
+                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
+        ytcfg["yt","__parallel"] = "True"
+        if exe_name == "embed_enzo" or \
+            ("_parallel" in dir(sys) and sys._parallel == True):
+            ytcfg["yt","inline"] = "True"
+        # I believe we do not need to turn this off manually
+        #ytcfg["yt","StoreParameterFiles"] = "False"
+        # Now let's make sure we have the right options set.
+        if MPI.COMM_WORLD.rank > 0:
+            if ytcfg.getboolean("yt","LogFile"):
+                ytcfg["yt","LogFile"] = "False"
+                yt.utilities.logger.disable_file_logging()
+    return parallel_capable
+
+# This fallback is for Paraview:
+
+# We use two signals, SIGUSR1 and SIGUSR2.  In a non-threaded environment,
+# we set up handlers to process these by printing the current stack and to
+# raise a RuntimeError.  The latter can be used, inside pdb, to catch an error
+# and then examine the current stack.
+try:
+    signal.signal(signal.SIGUSR1, signal_print_traceback)
+    mylog.debug("SIGUSR1 registered for traceback printing")
+    signal.signal(signal.SIGUSR2, signal_ipython)
+    mylog.debug("SIGUSR2 registered for IPython Insertion")
+except ValueError:  # Not in main thread
+    pass
+
+class SetExceptionHandling(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string = None):
+        # If we recognize one of the arguments on the command line as indicating a
+        # different mechanism for handling tracebacks, we attach one of those handlers
+        # and remove the argument from sys.argv.
+        #
+        if self.dest == "paste":
+            sys.excepthook = paste_traceback
+            mylog.debug("Enabling traceback pasting")
+        elif self.dest == "paste-detailed":
+            sys.excepthook = paste_traceback_detailed
+            mylog.debug("Enabling detailed traceback pasting")
+        elif self.dest == "detailed":
+            import cgitb; cgitb.enable(format="text")
+            mylog.debug("Enabling detailed traceback reporting")
+        elif self.dest == "rpdb":
+            sys.excepthook = rpdb.rpdb_excepthook
+            mylog.debug("Enabling remote debugging")
+
+class SetConfigOption(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string = None):
+        param, val = values.split("=")
+        mylog.debug("Overriding config: %s = %s", param, val)
+        ytcfg["yt",param] = val
+
+parser = argparse.ArgumentParser(description = 'yt command line arguments')
+parser.add_argument("--config", action=SetConfigOption,
+    help = "Set configuration option, in the form param=value")
+parser.add_argument("--paste", action=SetExceptionHandling,
+    help = "Paste traceback to paste.yt-project.org", nargs = 0)
+parser.add_argument("--paste-detailed", action=SetExceptionHandling,
+    help = "Paste a detailed traceback with local variables to " +
+           "paste.yt-project.org", nargs = 0)
+parser.add_argument("--detailed", action=SetExceptionHandling,
+    help = "Display detailed traceback.", nargs = 0)
+parser.add_argument("--rpdb", action=SetExceptionHandling,
+    help = "Enable remote pdb interaction (for parallel debugging).", nargs = 0)
+parser.add_argument("--parallel", action="store_true", default=False,
+    dest = "parallel",
+    help = "Run in MPI-parallel mode (must be launched as an MPI task)")
+if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
+
+unparsed_args = []
+
+parallel_capable = False
+if not ytcfg.getboolean("yt","__command_line"):
+    opts, unparsed_args = parser.parse_known_args()
+    # THIS IS NOT SUCH A GOOD IDEA:
+    #sys.argv = [a for a in unparsed_args]
+    if opts.parallel:
+        parallel_capable = turn_on_parallelism()
+else:
+    subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
+    def print_help(*args, **kwargs):
+        parser.print_help()
+    help_parser = subparsers.add_parser("help", help="Print help message")
+    help_parser.set_defaults(func=print_help)
+
+
+if parallel_capable == True:
+    pass
+elif exe_name in \
+        ["mpi4py", "embed_enzo",
+         "python"+sys.version[:3]+"-mpi"] \
+    or '_parallel' in dir(sys) \
+    or any(["ipengine" in arg for arg in sys.argv]):
+    parallel_capable = turn_on_parallelism()
+else:
+    parallel_capable = False


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -65,6 +65,9 @@
     double log2(double x)
     long int lrint(double x)
     double fabs(double x)
+    double cos(double x)
+    double sin(double x)
+    double asin(double x)
 
 cdef struct Triangle:
     Triangle *next
@@ -238,6 +241,33 @@
         tr[i] = ipnest
     return tr
 
+def arr_fisheye_vectors(int resolution, np.float64_t fov):
+    # We now follow figures 4-7 of:
+    # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+    # ...but all in Cython.
+    cdef np.ndarray[np.float64_t, ndim=3] vp
+    cdef int i, j, k
+    cdef np.float64_t r, phi, theta, px, py
+    cdef np.float64_t pi = 3.1415926
+    cdef np.float64_t fov_rad = fov * pi / 180.0
+    vp = np.zeros((resolution, resolution, 3), dtype="float64")
+    for i in range(resolution):
+        px = 2.0 * i / (resolution) - 1.0
+        for j in range(resolution):
+            py = 2.0 * j / (resolution) - 1.0
+            r = (px*px + py*py)**0.5
+            if r == 0.0:
+                phi = 0.0
+            elif px < 0:
+                phi = pi - asin(py / r)
+            else:
+                phi = asin(py / r)
+            theta = r * fov_rad / 2.0
+            vp[i,j,0] = sin(theta) * cos(phi)
+            vp[i,j,1] = sin(theta) * sin(phi)
+            vp[i,j,2] = cos(theta)
+    return vp
+
 cdef class star_kdtree_container:
     cdef kdtree_utils.kdtree *tree
     cdef public np.float64_t sigma


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -91,6 +91,7 @@
                   np.ndarray[np.int64_t, ndim=1] dims):
         # The data is likely brought in via a slice, so we copy it
         cdef np.ndarray[np.float64_t, ndim=3] tdata
+        self.container = NULL
         self.parent_grid_id = parent_grid_id
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
@@ -114,7 +115,8 @@
     def __dealloc__(self):
         # The data fields are not owned by the container, they are owned by us!
         # So we don't need to deallocate them.
-        free(self.container.data)
+        if self.container == NULL: return
+        if self.container.data != NULL: free(self.container.data)
         free(self.container)
 
 cdef struct ImageContainer:
@@ -259,42 +261,41 @@
         cdef np.float64_t width[3] 
         for i in range(3):
             width[i] = self.width[i]
-        #print iter[0], iter[1], iter[2], iter[3], width[0], width[1], width[2]
-        idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-        idata.supp_data = self.supp_data
-        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        if im.vd_strides[0] == -1:
-            for j in range(size):
-                vj = j % ny
-                vi = (j - vj) / ny + iter[0]
-                vj = vj + iter[2]
-                # Dynamically calculate the position
-                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                for i in range(3): idata.rgba[i] = im.image[i + offset]
-                walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                            (<void *> idata))
-                for i in range(3): im.image[i + offset] = idata.rgba[i]
-        else:
-            # If we do not have a simple image plane, we have to cast all
-            # our rays 
-            for j in range(size):
-                offset = j * 3
-                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                for i in range(3): idata.rgba[i] = im.image[i + offset]
-                walk_volume(vc, v_pos, v_dir, self.sampler, 
-                            (<void *> idata))
-                for i in range(3):  im.image[i + offset] = idata.rgba[i]
-        free(v_dir)
-        free(idata)
-        free(v_pos)
-        #print self.aimage.max()
+        with nogil, parallel():
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            if im.vd_strides[0] == -1:
+                for j in prange(size, schedule="dynamic"):
+                    vj = j % ny
+                    vi = (j - vj) / ny + iter[0]
+                    vj = vj + iter[2]
+                    # Dynamically calculate the position
+                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
+            else:
+                # If we do not have a simple image plane, we have to cast all
+                # our rays 
+                for j in prange(size, schedule="dynamic"):
+                    offset = j * 3
+                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                    for i in range(3): idata.rgba[i] = im.image[i + offset]
+                    walk_volume(vc, v_pos, v_dir, self.sampler, 
+                                (<void *> idata))
+                    for i in range(3): im.image[i + offset] = idata.rgba[i]
+            free(v_dir)
+            free(idata)
+            free(v_pos)
         return hit
 
 cdef void projection_sampler(
@@ -753,7 +754,7 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0):
+                     np.float64_t enter_t = -1.0) nogil:
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.1
     cdef np.float64_t iv_dir[3]


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -45,17 +45,18 @@
     cdef int i
     cdef np.float64_t mi = 1e100, ma = -1e100, v
     cdef int np = px.shape[0]
-    for i in range(np):
-        v = value[i]
-        if v < mi or v > ma:
-            if px[i] + pdx[i] < leftx: continue
-            if px[i] - pdx[i] > rightx: continue
-            if py[i] + pdy[i] < lefty: continue
-            if py[i] - pdy[i] > righty: continue
-            if pdx[i] < mindx or pdy[i] < mindx: continue
-            if maxdx > 0 and (pdx[i] > maxdx or pdy[i] > maxdx): continue
-            if v < mi: mi = v
-            if v > ma: ma = v
+    with nogil:
+        for i in range(np):
+            v = value[i]
+            if v < mi or v > ma:
+                if px[i] + pdx[i] < leftx: continue
+                if px[i] - pdx[i] > rightx: continue
+                if py[i] + pdy[i] < lefty: continue
+                if py[i] - pdy[i] > righty: continue
+                if pdx[i] < mindx or pdy[i] < mindx: continue
+                if maxdx > 0 and (pdx[i] > maxdx or pdy[i] > maxdx): continue
+                if v < mi: mi = v
+                if v > ma: ma = v
     return (mi, ma)
 
 @cython.boundscheck(False)


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -24,7 +24,7 @@
 """
 
 import runner, output_tests
-from runner import RegressionTestRunner, run_main
+from runner import RegressionTestRunner
 
 from output_tests import RegressionTest, SingleOutputTest, \
     MultipleOutputTest, YTStaticOutputTest, create_test


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -31,7 +31,6 @@
 from .runner import \
     RegressionTestRunner, \
     RegressionTestStorage, \
-    run_main, \
     clear_registry, \
     registry_entries
 


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -27,8 +27,8 @@
 import os, shelve, cPickle, sys, imp, tempfile
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
-import yt.utilities.cmdln as cmdln
 from yt.funcs import *
+from yt.utilities.command_line import YTCommand
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -169,98 +169,22 @@
             print "Running '%s'" % (test_name)
             self.run_test(line.strip())
 
-class EnzoTestRunnerCommands(cmdln.Cmdln):
-    name = "enzo_tests"
+def _load_modules(test_modules):
+    for fn in test_modules:
+        if fn.endswith(".py"): fn = fn[:-3]
+        print "Loading module %s" % (fn)
+        mname = os.path.basename(fn)
+        f, filename, desc = imp.find_module(mname, [os.path.dirname(fn)])
+        project = imp.load_module(mname, f, filename, desc)
 
-    def _load_modules(self, test_modules):
-        for fn in test_modules:
-            if fn.endswith(".py"): fn = fn[:-3]
-            print "Loading module %s" % (fn)
-            mname = os.path.basename(fn)
-            f, filename, desc = imp.find_module(mname, [os.path.dirname(fn)])
-            project = imp.load_module(mname, f, filename, desc)
-
-    def _update_io_log(self, opts, kwargs):
-        if opts.datasets is None or len(opts.datasets) == 0: return
-        f = tempfile.NamedTemporaryFile()
-        kwargs['io_log'] = f.name
-        for d in opts.datasets:
-            fn = os.path.expanduser(d)
-            print "Registered dataset %s" % fn
-            f.write("DATASET WRITTEN %s\n" % fn)
-        f.flush()
-        f.seek(0)
-        return f
-
-    @cmdln.option("-f", "--dataset", action="append",
-                  help="override the io_log and add this to the new one",
-                  dest="datasets")
-    @cmdln.option("-p", "--results-path", action="store",
-                  help="which directory should results be stored in",
-                  dest="results_path", default=".")
-    def do_store(self, subcmd, opts, name, *test_modules):
-        """
-        ${cmd_name}: Run and store a new dataset.
-
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-        sys.path.insert(0, ".")
-        self._load_modules(test_modules)
-        kwargs = {}
-        f = self._update_io_log(opts, kwargs)
-        test_runner = RegressionTestRunner(name,
-                results_path = opts.results_path,
-                **kwargs)
-        test_runner.run_all_tests()
-
-    @cmdln.option("-o", "--output", action="store",
-                  help="output results to file",
-                  dest="outputfile", default=None)
-    @cmdln.option("-p", "--results-path", action="store",
-                  help="which directory should results be stored in",
-                  dest="results_path", default=".")
-    @cmdln.option("-n", "--nose", action="store_true",
-                  help="run through nose with xUnit testing",
-                  dest="run_nose", default=False)
-    @cmdln.option("-f", "--dataset", action="append",
-                  help="override the io_log and add this to the new one",
-                  dest="datasets")
-    def do_compare(self, subcmd, opts, reference, comparison, *test_modules):
-        """
-        ${cmd_name}: Compare a reference dataset against a new dataset.  The
-        new dataset will be run regardless of whether it exists or not.
-
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-        if comparison == "__CURRENT__":
-            import pkg_resources
-            yt_provider = pkg_resources.get_provider("yt")
-            path = os.path.dirname(yt_provider.module_path)
-            from yt.utilities.command_line import _get_hg_version
-            comparison = _get_hg_version(path)[:12]
-            print "Setting comparison to: %s" % (comparison)
-        sys.path.insert(0, ".")
-        self._load_modules(test_modules)
-        kwargs = {}
-        f = self._update_io_log(opts, kwargs)
-        test_runner = RegressionTestRunner(comparison, reference,
-                            results_path=opts.results_path,
-                            **kwargs)
-        if opts.run_nose:
-            test_runner.watcher = Xunit()
-        results = test_runner.run_all_tests()
-        if opts.run_nose:
-            test_runner.watcher.report()
-        if opts.outputfile is not None:
-            f = open(str(opts.outputfile), "w")
-            for testname, success in sorted(results.items()):
-                f.write("%s %s\n" % (testname.ljust(100), success))
-
-def run_main():
-    etrc = EnzoTestRunnerCommands()
-    sys.exit(etrc.main())
-
-if __name__ == "__main__":
-    run_main()
+def _update_io_log(opts, kwargs):
+    if opts.datasets is None or len(opts.datasets) == 0: return
+    f = tempfile.NamedTemporaryFile()
+    kwargs['io_log'] = f.name
+    for d in opts.datasets:
+        fn = os.path.expanduser(d)
+        print "Registered dataset %s" % fn
+        f.write("DATASET WRITTEN %s\n" % fn)
+    f.flush()
+    f.seek(0)
+    return f


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/cmdln.py
--- a/yt/utilities/cmdln.py
+++ /dev/null
@@ -1,1586 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2002-2007 ActiveState Software Inc.
-# License: MIT (see LICENSE.txt for license details)
-# Author:  Trent Mick
-# Home:    http://trentm.com/projects/cmdln/
-
-"""An improvement on Python's standard cmd.py module.
-
-As with cmd.py, this module provides "a simple framework for writing
-line-oriented command intepreters."  This module provides a 'RawCmdln'
-class that fixes some design flaws in cmd.Cmd, making it more scalable
-and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
-or simple shells.  And it provides a 'Cmdln' class that add
-optparse-based option processing. Basically you use it like this:
-
-    import cmdln
-
-    class MySVN(cmdln.Cmdln):
-        name = "svn"
-
-        @cmdln.alias('stat', 'st')
-        @cmdln.option('-v', '--verbose', action='store_true'
-                      help='print verbose information')
-        def do_status(self, subcmd, opts, *paths):
-            print "handle 'svn status' command"
-
-        #...
-
-    if __name__ == "__main__":
-        shell = MySVN()
-        retval = shell.main()
-        sys.exit(retval)
-
-See the README.txt or <http://trentm.com/projects/cmdln/> for more
-details.
-"""
-
-__version_info__ = (1, 1, 2)
-__version__ = '.'.join(map(str, __version_info__))
-
-import os
-import sys
-import re
-import cmd
-import optparse
-from pprint import pprint
-import sys
-
-
-
-
-#---- globals
-
-LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
-
-# An unspecified optional argument when None is a meaningful value.
-_NOT_SPECIFIED = ("Not", "Specified")
-
-# Pattern to match a TypeError message from a call that
-# failed because of incorrect number of arguments (see
-# Python/getargs.c).
-_INCORRECT_NUM_ARGS_RE = re.compile(
-    r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
-
-
-
-#---- exceptions
-
-class CmdlnError(Exception):
-    """A cmdln.py usage error."""
-    def __init__(self, msg):
-        self.msg = msg
-    def __str__(self):
-        return self.msg
-
-class CmdlnUserError(Exception):
-    """An error by a user of a cmdln-based tool/shell."""
-    pass
-
-
-
-#---- public methods and classes
-
-def alias(*aliases):
-    """Decorator to add aliases for Cmdln.do_* command handlers.
-    
-    Example:
-        class MyShell(cmdln.Cmdln):
-            @cmdln.alias("!", "sh")
-            def do_shell(self, argv):
-                #...implement 'shell' command
-    """
-    def decorate(f):
-        if not hasattr(f, "aliases"):
-            f.aliases = []
-        f.aliases += aliases
-        return f
-    return decorate
-
-
-class RawCmdln(cmd.Cmd):
-    """An improved (on cmd.Cmd) framework for building multi-subcommand
-    scripts (think "svn" & "cvs") and simple shells (think "pdb" and
-    "gdb").
-
-    A simple example:
-
-        import cmdln
-
-        class MySVN(cmdln.RawCmdln):
-            name = "svn"
-
-            @cmdln.aliases('stat', 'st')
-            def do_status(self, argv):
-                print "handle 'svn status' command"
-
-        if __name__ == "__main__":
-            shell = MySVN()
-            retval = shell.main()
-            sys.exit(retval)
-
-    See <http://trentm.com/projects/cmdln> for more information.
-    """
-    name = None      # if unset, defaults basename(sys.argv[0])
-    prompt = None    # if unset, defaults to self.name+"> "
-    version = None   # if set, default top-level options include --version
-
-    # Default messages for some 'help' command error cases.
-    # They are interpolated with one arg: the command.
-    nohelp = "no help on '%s'"
-    unknowncmd = "unknown command: '%s'"
-
-    helpindent = '' # string with which to indent help output
-
-    def __init__(self, completekey='tab', 
-                 stdin=None, stdout=None, stderr=None):
-        """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
-
-        The optional argument 'completekey' is the readline name of a
-        completion key; it defaults to the Tab key. If completekey is
-        not None and the readline module is available, command completion
-        is done automatically.
-        
-        The optional arguments 'stdin', 'stdout' and 'stderr' specify
-        alternate input, output and error output file objects; if not
-        specified, sys.* are used.
-        
-        If 'stdout' but not 'stderr' is specified, stdout is used for
-        error output. This is to provide least surprise for users used
-        to only the 'stdin' and 'stdout' options with cmd.Cmd.
-        """
-        import sys
-        if self.name is None:
-            self.name = os.path.basename(sys.argv[0])
-        if self.prompt is None:
-            self.prompt = self.name+"> "
-        self._name_str = self._str(self.name)
-        self._prompt_str = self._str(self.prompt)
-        if stdin is not None:
-            self.stdin = stdin
-        else:
-            self.stdin = sys.stdin
-        if stdout is not None:
-            self.stdout = stdout
-        else:
-            self.stdout = sys.stdout
-        if stderr is not None:
-            self.stderr = stderr
-        elif stdout is not None:
-            self.stderr = stdout
-        else:
-            self.stderr = sys.stderr
-        self.cmdqueue = []
-        self.completekey = completekey
-        self.cmdlooping = False
-
-    def get_optparser(self):
-        """Hook for subclasses to set the option parser for the
-        top-level command/shell.
-
-        This option parser is used retrieved and used by `.main()' to
-        handle top-level options.
-
-        The default implements a single '-h|--help' option. Sub-classes
-        can return None to have no options at the top-level. Typically
-        an instance of CmdlnOptionParser should be returned.
-        """
-        version = (self.version is not None 
-                    and "%s %s" % (self._name_str, self.version)
-                    or None)
-        return CmdlnOptionParser(self, version=version)
-
-    def postoptparse(self):
-        """Hook method executed just after `.main()' parses top-level
-        options.
-
-        When called `self.options' holds the results of the option parse.
-        """
-        pass
-
-    def main(self, argv=None, loop=LOOP_NEVER):
-        """A possible mainline handler for a script, like so:
-
-            import cmdln
-            class MyCmd(cmdln.Cmdln):
-                name = "mycmd"
-                ...
-            
-            if __name__ == "__main__":
-                MyCmd().main()
-
-        By default this will use sys.argv to issue a single command to
-        'MyCmd', then exit. The 'loop' argument can be use to control
-        interactive shell behaviour.
-        
-        Arguments:
-            "argv" (optional, default sys.argv) is the command to run.
-                It must be a sequence, where the first element is the
-                command name and subsequent elements the args for that
-                command.
-            "loop" (optional, default LOOP_NEVER) is a constant
-                indicating if a command loop should be started (i.e. an
-                interactive shell). Valid values (constants on this module):
-                    LOOP_ALWAYS     start loop and run "argv", if any
-                    LOOP_NEVER      run "argv" (or .emptyline()) and exit
-                    LOOP_IF_EMPTY   run "argv", if given, and exit;
-                                    otherwise, start loop
-        """
-        if argv is None:
-            import sys
-            argv = sys.argv
-        else:
-            argv = argv[:] # don't modify caller's list
-
-        self.optparser = self.get_optparser()
-        if self.optparser: # i.e. optparser=None means don't process for opts
-            try:
-                self.options, args = self.optparser.parse_args(argv[1:])
-            except CmdlnUserError, ex:
-                msg = "%s: %s\nTry '%s help' for info.\n"\
-                      % (self.name, ex, self.name)
-                self.stderr.write(self._str(msg))
-                self.stderr.flush()
-                return 1
-            except StopOptionProcessing, ex:
-                return 0
-        else:
-            self.options, args = None, argv[1:]
-        self.postoptparse()
-
-        if loop == LOOP_ALWAYS:
-            if args:
-                self.cmdqueue.append(args)
-            return self.cmdloop()
-        elif loop == LOOP_NEVER:
-            if args:
-                return self.cmd(args)
-            else:
-                return self.emptyline()
-        elif loop == LOOP_IF_EMPTY:
-            if args:
-                return self.cmd(args)
-            else:
-                return self.cmdloop()
-
-    def cmd(self, argv):
-        """Run one command and exit.
-        
-            "argv" is the arglist for the command to run. argv[0] is the
-                command to run. If argv is an empty list then the
-                'emptyline' handler is run.
-
-        Returns the return value from the command handler.
-        """
-        assert isinstance(argv, (list, tuple)), \
-                "'argv' is not a sequence: %r" % argv
-        retval = None
-        try:
-            argv = self.precmd(argv)
-            retval = self.onecmd(argv)
-            self.postcmd(argv)
-        except:
-            if not self.cmdexc(argv):
-                raise
-            retval = 1
-        return retval
-
-    def _str(self, s):
-        """Safely convert the given str/unicode to a string for printing."""
-        try:
-            return str(s)
-        except UnicodeError:
-            #XXX What is the proper encoding to use here? 'utf-8' seems
-            #    to work better than "getdefaultencoding" (usually
-            #    'ascii'), on OS X at least.
-            #import sys
-            #return s.encode(sys.getdefaultencoding(), "replace")
-            return s.encode("utf-8", "replace")
-
-    def cmdloop(self, intro=None):
-        """Repeatedly issue a prompt, accept input, parse into an argv, and
-        dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
-        the argv. In other words, start a shell.
-        
-            "intro" (optional) is a introductory message to print when
-                starting the command loop. This overrides the class
-                "intro" attribute, if any.
-        """
-        self.cmdlooping = True
-        self.preloop()
-        if self.use_rawinput and self.completekey:
-            try:
-                import readline
-                self.old_completer = readline.get_completer()
-                readline.set_completer(self.complete)
-                readline.parse_and_bind(self.completekey+": complete")
-            except ImportError:
-                pass
-        try:
-            if intro is None:
-                intro = self.intro
-            if intro:
-                intro_str = self._str(intro)
-                self.stdout.write(intro_str+'\n')
-            self.stop = False
-            retval = None
-            while not self.stop:
-                if self.cmdqueue:
-                    argv = self.cmdqueue.pop(0)
-                    assert isinstance(argv, (list, tuple)), \
-                            "item on 'cmdqueue' is not a sequence: %r" % argv
-                else:
-                    if self.use_rawinput:
-                        try:
-                            line = raw_input(self._prompt_str)
-                        except EOFError:
-                            line = 'EOF'
-                    else:
-                        self.stdout.write(self._prompt_str)
-                        self.stdout.flush()
-                        line = self.stdin.readline()
-                        if not len(line):
-                            line = 'EOF'
-                        else:
-                            line = line[:-1] # chop '\n'
-                    argv = line2argv(line)
-                try:
-                    argv = self.precmd(argv)
-                    retval = self.onecmd(argv)
-                    self.postcmd(argv)
-                except:
-                    if not self.cmdexc(argv):
-                        raise
-                    retval = 1
-                self.lastretval = retval
-            self.postloop()
-        finally:
-            if self.use_rawinput and self.completekey:
-                try:
-                    import readline
-                    readline.set_completer(self.old_completer)
-                except ImportError:
-                    pass
-        self.cmdlooping = False
-        return retval
-
-    def precmd(self, argv):
-        """Hook method executed just before the command argv is
-        interpreted, but after the input prompt is generated and issued.
-
-            "argv" is the cmd to run.
-            
-        Returns an argv to run (i.e. this method can modify the command
-        to run).
-        """
-        return argv
-
-    def postcmd(self, argv):
-        """Hook method executed just after a command dispatch is finished.
-        
-            "argv" is the command that was run.
-        """
-        pass
-
-    def cmdexc(self, argv):
-        """Called if an exception is raised in any of precmd(), onecmd(),
-        or postcmd(). If True is returned, the exception is deemed to have
-        been dealt with. Otherwise, the exception is re-raised.
-
-        The default implementation handles CmdlnUserError's, which
-        typically correspond to user error in calling commands (as
-        opposed to programmer error in the design of the script using
-        cmdln.py).
-        """
-        import sys
-        type, exc, traceback = sys.exc_info()
-        if isinstance(exc, CmdlnUserError):
-            msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
-                  % (self.name, argv[0], exc, self.name, argv[0])
-            self.stderr.write(self._str(msg))
-            self.stderr.flush()
-            return True
-
-    def onecmd(self, argv):
-        if not argv:
-            return self.emptyline()
-        self.lastcmd = argv
-        cmdname = self._get_canonical_cmd_name(argv[0])
-        if cmdname:
-            handler = self._get_cmd_handler(cmdname)
-            if handler:
-                return self._dispatch_cmd(handler, argv)
-        return self.default(argv)
-
-    def _dispatch_cmd(self, handler, argv):
-        return handler(argv)
-
-    def default(self, argv):
-        """Hook called to handle a command for which there is no handler.
-
-            "argv" is the command and arguments to run.
-        
-        The default implementation writes and error message to stderr
-        and returns an error exit status.
-
-        Returns a numeric command exit status.
-        """
-        errmsg = self._str(self.unknowncmd % (argv[0],))
-        if self.cmdlooping:
-            self.stderr.write(errmsg+"\n")
-        else:
-            self.stderr.write("%s: %s\nTry '%s help' for info.\n"
-                              % (self._name_str, errmsg, self._name_str))
-        self.stderr.flush()
-        return 1
-
-    def parseline(self, line):
-        # This is used by Cmd.complete (readline completer function) to
-        # massage the current line buffer before completion processing.
-        # We override to drop special '!' handling.
-        line = line.strip()
-        if not line:
-            return None, None, line
-        elif line[0] == '?':
-            line = 'help ' + line[1:]
-        i, n = 0, len(line)
-        while i < n and line[i] in self.identchars: i = i+1
-        cmd, arg = line[:i], line[i:].strip()
-        return cmd, arg, line
-
-    def helpdefault(self, cmd, known):
-        """Hook called to handle help on a command for which there is no
-        help handler.
-
-            "cmd" is the command name on which help was requested.
-            "known" is a boolean indicating if this command is known
-                (i.e. if there is a handler for it).
-        
-        Returns a return code.
-        """
-        if known:
-            msg = self._str(self.nohelp % (cmd,))
-            if self.cmdlooping:
-                self.stderr.write(msg + '\n')
-            else:
-                self.stderr.write("%s: %s\n" % (self.name, msg))
-        else:
-            msg = self.unknowncmd % (cmd,)
-            if self.cmdlooping:
-                self.stderr.write(msg + '\n')
-            else:
-                self.stderr.write("%s: %s\n"
-                                  "Try '%s help' for info.\n"
-                                  % (self.name, msg, self.name))
-        self.stderr.flush()
-        return 1
-
-    def do_help(self, argv):
-        """${cmd_name}: give detailed help on a specific sub-command
-
-        Usage:
-            ${name} help [COMMAND]
-        """
-        if len(argv) > 1: # asking for help on a particular command
-            doc = None
-            cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
-            if not cmdname:
-                return self.helpdefault(argv[1], False)
-            else:
-                helpfunc = getattr(self, "help_"+cmdname, None)
-                if helpfunc:
-                    doc = helpfunc()
-                else:
-                    handler = self._get_cmd_handler(cmdname)
-                    if handler:
-                        doc = handler.__doc__
-                    if doc is None:
-                        return self.helpdefault(argv[1], handler != None)
-        else: # bare "help" command
-            doc = self.__class__.__doc__  # try class docstring
-            if doc is None:
-                # Try to provide some reasonable useful default help.
-                if self.cmdlooping: prefix = ""
-                else:               prefix = self.name+' '
-                doc = """Usage:
-                    %sCOMMAND [ARGS...]
-                    %shelp [COMMAND]
-
-                ${option_list}
-                ${command_list}
-                ${help_list}
-                """ % (prefix, prefix)
-            cmdname = None
-
-        if doc: # *do* have help content, massage and print that
-            doc = self._help_reindent(doc)
-            doc = self._help_preprocess(doc, cmdname)
-            doc = doc.rstrip() + '\n' # trim down trailing space
-            self.stdout.write(self._str(doc))
-            self.stdout.flush()
-    do_help.aliases = ["?"]
-
-    def _help_reindent(self, help, indent=None):
-        """Hook to re-indent help strings before writing to stdout.
-
-            "help" is the help content to re-indent
-            "indent" is a string with which to indent each line of the
-                help content after normalizing. If unspecified or None
-                then the default is use: the 'self.helpindent' class
-                attribute. By default this is the empty string, i.e.
-                no indentation.
-
-        By default, all common leading whitespace is removed and then
-        the lot is indented by 'self.helpindent'. When calculating the
-        common leading whitespace the first line is ignored -- hence
-        help content for Conan can be written as follows and have the
-        expected indentation:
-
-            def do_crush(self, ...):
-                '''${cmd_name}: crush your enemies, see them driven before you...
-
-                c.f. Conan the Barbarian'''
-        """
-        if indent is None:
-            indent = self.helpindent
-        lines = help.splitlines(0)
-        _dedentlines(lines, skip_first_line=True)
-        lines = [(indent+line).rstrip() for line in lines]
-        return '\n'.join(lines)
-
-    def _help_preprocess(self, help, cmdname):
-        """Hook to preprocess a help string before writing to stdout.
-
-            "help" is the help string to process.
-            "cmdname" is the canonical sub-command name for which help
-                is being given, or None if the help is not specific to a
-                command.
-
-        By default the following template variables are interpolated in
-        help content. (Note: these are similar to Python 2.4's
-        string.Template interpolation but not quite.)
-
-        ${name}
-            The tool's/shell's name, i.e. 'self.name'.
-        ${option_list}
-            A formatted table of options for this shell/tool.
-        ${command_list}
-            A formatted table of available sub-commands.
-        ${help_list}
-            A formatted table of additional help topics (i.e. 'help_*'
-            methods with no matching 'do_*' method).
-        ${cmd_name}
-            The name (and aliases) for this sub-command formatted as:
-            "NAME (ALIAS1, ALIAS2, ...)".
-        ${cmd_usage}
-            A formatted usage block inferred from the command function
-            signature.
-        ${cmd_option_list}
-            A formatted table of options for this sub-command. (This is
-            only available for commands using the optparse integration,
-            i.e.  using @cmdln.option decorators or manually setting the
-            'optparser' attribute on the 'do_*' method.)
-
-        Returns the processed help. 
-        """
-        preprocessors = {
-            "${name}":            self._help_preprocess_name,
-            "${option_list}":     self._help_preprocess_option_list,
-            "${command_list}":    self._help_preprocess_command_list,
-            "${help_list}":       self._help_preprocess_help_list,
-            "${cmd_name}":        self._help_preprocess_cmd_name,
-            "${cmd_usage}":       self._help_preprocess_cmd_usage,
-            "${cmd_option_list}": self._help_preprocess_cmd_option_list,
-        }
-
-        for marker, preprocessor in preprocessors.items():
-            if marker in help:
-                help = preprocessor(help, cmdname)
-        return help
-
-    def _help_preprocess_name(self, help, cmdname=None):
-        return help.replace("${name}", self.name)
-
-    def _help_preprocess_option_list(self, help, cmdname=None):
-        marker = "${option_list}"
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        if self.optparser:
-            # Setup formatting options and format.
-            # - Indentation of 4 is better than optparse default of 2.
-            #   C.f. Damian Conway's discussion of this in Perl Best
-            #   Practices.
-            self.optparser.formatter.indent_increment = 4
-            self.optparser.formatter.current_indent = indent_width
-            block = self.optparser.format_option_help() + '\n'
-        else:
-            block = ""
-            
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-
-    def _help_preprocess_command_list(self, help, cmdname=None):
-        marker = "${command_list}"
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        # Find any aliases for commands.
-        token2canonical = self._get_canonical_map()
-        aliases = {}
-        for token, cmdname in token2canonical.items():
-            if token == cmdname: continue
-            aliases.setdefault(cmdname, []).append(token)
-
-        # Get the list of (non-hidden) commands and their
-        # documentation, if any.
-        cmdnames = {} # use a dict to strip duplicates
-        for attr in self.get_names():
-            if attr.startswith("do_"):
-                cmdnames[attr[3:]] = True
-        cmdnames = cmdnames.keys()
-        cmdnames.sort()
-        linedata = []
-        for cmdname in cmdnames:
-            if aliases.get(cmdname):
-                a = aliases[cmdname]
-                a.sort()
-                cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
-            else:
-                cmdstr = cmdname
-            doc = None
-            try:
-                helpfunc = getattr(self, 'help_'+cmdname)
-            except AttributeError:
-                handler = self._get_cmd_handler(cmdname)
-                if handler:
-                    doc = handler.__doc__
-            else:
-                doc = helpfunc()
-                
-            # Strip "${cmd_name}: " from the start of a command's doc. Best
-            # practice dictates that command help strings begin with this, but
-            # it isn't at all wanted for the command list.
-            to_strip = "${cmd_name}:"
-            if doc and doc.startswith(to_strip):
-                #log.debug("stripping %r from start of %s's help string",
-                #          to_strip, cmdname)
-                doc = doc[len(to_strip):].lstrip()
-            linedata.append( (cmdstr, doc) )
-
-        if linedata:
-            subindent = indent + ' '*4
-            lines = _format_linedata(linedata, subindent, indent_width+4)
-            block = indent + "Commands:\n" \
-                    + '\n'.join(lines) + "\n\n"
-            help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    def _gen_names_and_attrs(self):
-        # Inheritance says we have to look in class and
-        # base classes; order is not important.
-        names = []
-        classes = [self.__class__]
-        while classes:
-            aclass = classes.pop(0)
-            if aclass.__bases__:
-                classes = classes + list(aclass.__bases__)
-            for name in dir(aclass):
-                yield (name, getattr(aclass, name))
-
-    def _help_preprocess_help_list(self, help, cmdname=None):
-        marker = "${help_list}"
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        # Determine the additional help topics, if any.
-        helpnames = {}
-        token2cmdname = self._get_canonical_map()
-        for attrname, attr in self._gen_names_and_attrs():
-            if not attrname.startswith("help_"): continue
-            helpname = attrname[5:]
-            if helpname not in token2cmdname:
-                helpnames[helpname] = attr
-
-        if helpnames:
-            linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()]
-            linedata.sort()
-
-            subindent = indent + ' '*4
-            lines = _format_linedata(linedata, subindent, indent_width+4)
-            block = (indent
-                    + "Additional help topics (run `%s help TOPIC'):\n" % self.name
-                    + '\n'.join(lines)
-                    + "\n\n")
-        else:
-            block = ''
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    def _help_preprocess_cmd_name(self, help, cmdname=None):
-        marker = "${cmd_name}"
-        handler = self._get_cmd_handler(cmdname)
-        if not handler:
-            raise CmdlnError("cannot preprocess '%s' into help string: "
-                             "could not find command handler for %r" 
-                             % (marker, cmdname))
-        s = cmdname
-        if hasattr(handler, "aliases"):
-            s += " (%s)" % (", ".join(handler.aliases))
-        help = help.replace(marker, s)
-        return help
-
-    #TODO: this only makes sense as part of the Cmdln class.
-    #      Add hooks to add help preprocessing template vars and put
-    #      this one on that class.
-    def _help_preprocess_cmd_usage(self, help, cmdname=None):
-        marker = "${cmd_usage}"
-        handler = self._get_cmd_handler(cmdname)
-        if not handler:
-            raise CmdlnError("cannot preprocess '%s' into help string: "
-                             "could not find command handler for %r" 
-                             % (marker, cmdname))
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-
-        # Extract the introspection bits we need.
-        func = handler.im_func
-        if func.func_defaults:
-            func_defaults = list(func.func_defaults)
-        else:
-            func_defaults = []
-        co_argcount = func.func_code.co_argcount
-        co_varnames = func.func_code.co_varnames
-        co_flags = func.func_code.co_flags
-        CO_FLAGS_ARGS = 4
-        CO_FLAGS_KWARGS = 8
-
-        # Adjust argcount for possible *args and **kwargs arguments.
-        argcount = co_argcount
-        if co_flags & CO_FLAGS_ARGS:   argcount += 1
-        if co_flags & CO_FLAGS_KWARGS: argcount += 1
-
-        # Determine the usage string.
-        usage = "%s %s" % (self.name, cmdname)
-        if argcount <= 2:   # handler ::= do_FOO(self, argv)
-            usage += " [ARGS...]"
-        elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
-            argnames = list(co_varnames[3:argcount])
-            tail = ""
-            if co_flags & CO_FLAGS_KWARGS:
-                name = argnames.pop(-1)
-                import warnings
-                # There is no generally accepted mechanism for passing
-                # keyword arguments from the command line. Could
-                # *perhaps* consider: arg=value arg2=value2 ...
-                warnings.warn("argument '**%s' on '%s.%s' command "
-                              "handler will never get values" 
-                              % (name, self.__class__.__name__,
-                                 func.func_name))
-            if co_flags & CO_FLAGS_ARGS:
-                name = argnames.pop(-1)
-                tail = "[%s...]" % name.upper()
-            while func_defaults:
-                func_defaults.pop(-1)
-                name = argnames.pop(-1)
-                tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
-            while argnames:
-                name = argnames.pop(-1)
-                tail = "%s %s" % (name.upper(), tail)
-            usage += ' ' + tail
-
-        block_lines = [
-            self.helpindent + "Usage:",
-            self.helpindent + ' '*4 + usage
-        ]
-        block = '\n'.join(block_lines) + '\n\n'
-
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    #TODO: this only makes sense as part of the Cmdln class.
-    #      Add hooks to add help preprocessing template vars and put
-    #      this one on that class.
-    def _help_preprocess_cmd_option_list(self, help, cmdname=None):
-        marker = "${cmd_option_list}"
-        handler = self._get_cmd_handler(cmdname)
-        if not handler:
-            raise CmdlnError("cannot preprocess '%s' into help string: "
-                             "could not find command handler for %r" 
-                             % (marker, cmdname))
-        indent, indent_width = _get_indent(marker, help)
-        suffix = _get_trailing_whitespace(marker, help)
-        if hasattr(handler, "optparser"):
-            # Setup formatting options and format.
-            # - Indentation of 4 is better than optparse default of 2.
-            #   C.f. Damian Conway's discussion of this in Perl Best
-            #   Practices.
-            handler.optparser.formatter.indent_increment = 4
-            handler.optparser.formatter.current_indent = indent_width
-            block = handler.optparser.format_option_help() + '\n'
-        else:
-            block = ""
-
-        help = help.replace(indent+marker+suffix, block, 1)
-        return help
-
-    def _get_canonical_cmd_name(self, token):
-        map = self._get_canonical_map()
-        return map.get(token, None)
-
-    def _get_canonical_map(self):
-        """Return a mapping of available command names and aliases to
-        their canonical command name.
-        """
-        cacheattr = "_token2canonical"
-        if not hasattr(self, cacheattr):
-            # Get the list of commands and their aliases, if any.
-            token2canonical = {}
-            cmd2funcname = {} # use a dict to strip duplicates
-            for attr in self.get_names():
-                if attr.startswith("do_"):    cmdname = attr[3:]
-                elif attr.startswith("_do_"): cmdname = attr[4:]
-                else:
-                    continue
-                cmd2funcname[cmdname] = attr
-                token2canonical[cmdname] = cmdname
-            for cmdname, funcname in cmd2funcname.items(): # add aliases
-                func = getattr(self, funcname)
-                aliases = getattr(func, "aliases", [])
-                for alias in aliases:
-                    if alias in cmd2funcname:
-                        import warnings
-                        warnings.warn("'%s' alias for '%s' command conflicts "
-                                      "with '%s' handler"
-                                      % (alias, cmdname, cmd2funcname[alias]))
-                        continue
-                    token2canonical[alias] = cmdname
-            setattr(self, cacheattr, token2canonical)
-        return getattr(self, cacheattr)
-
-    def _get_cmd_handler(self, cmdname):
-        handler = None
-        try:
-            handler = getattr(self, 'do_' + cmdname)
-        except AttributeError:
-            try:
-                # Private command handlers begin with "_do_".
-                handler = getattr(self, '_do_' + cmdname)
-            except AttributeError:
-                pass
-        return handler
-
-    def _do_EOF(self, argv):
-        # Default EOF handler
-        # Note: an actual EOF is redirected to this command.
-        #TODO: separate name for this. Currently it is available from
-        #      command-line. Is that okay?
-        self.stdout.write('\n')
-        self.stdout.flush()
-        self.stop = True
-
-    def emptyline(self):
-        # Different from cmd.Cmd: don't repeat the last command for an
-        # emptyline.
-        if self.cmdlooping:
-            pass
-        else:
-            return self.do_help(["help"])
-
-
-#---- optparse.py extension to fix (IMO) some deficiencies
-#
-# See the class _OptionParserEx docstring for details.
-#
-
-class StopOptionProcessing(Exception):
-    """Indicate that option *and argument* processing should stop
-    cleanly. This is not an error condition. It is similar in spirit to
-    StopIteration. This is raised by _OptionParserEx's default "help"
-    and "version" option actions and can be raised by custom option
-    callbacks too.
-    
-    Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
-    usage is:
-
-        parser = CmdlnOptionParser(mycmd)
-        parser.add_option("-f", "--force", dest="force")
-        ...
-        try:
-            opts, args = parser.parse_args()
-        except StopOptionProcessing:
-            # normal termination, "--help" was probably given
-            sys.exit(0)
-    """
-
-class _OptionParserEx(optparse.OptionParser):
-    """An optparse.OptionParser that uses exceptions instead of sys.exit.
-
-    This class is an extension of optparse.OptionParser that differs
-    as follows:
-    - Correct (IMO) the default OptionParser error handling to never
-      sys.exit(). Instead OptParseError exceptions are passed through.
-    - Add the StopOptionProcessing exception (a la StopIteration) to
-      indicate normal termination of option processing.
-      See StopOptionProcessing's docstring for details.
-
-    I'd also like to see the following in the core optparse.py, perhaps
-    as a RawOptionParser which would serve as a base class for the more
-    generally used OptionParser (that works as current):
-    - Remove the implicit addition of the -h|--help and --version
-      options. They can get in the way (e.g. if want '-?' and '-V' for
-      these as well) and it is not hard to do:
-        optparser.add_option("-h", "--help", action="help")
-        optparser.add_option("--version", action="version")
-      These are good practices, just not valid defaults if they can
-      get in the way.
-    """
-    def error(self, msg):
-        raise optparse.OptParseError(msg)
-
-    def exit(self, status=0, msg=None):
-        if status == 0:
-            raise StopOptionProcessing(msg)
-        else:
-            #TODO: don't lose status info here
-            raise optparse.OptParseError(msg)
-
-
-
-#---- optparse.py-based option processing support
-
-class CmdlnOptionParser(_OptionParserEx):
-    """An optparse.OptionParser class more appropriate for top-level
-    Cmdln options. For parsing of sub-command options, see
-    SubCmdOptionParser.
-
-    Changes:
-    - disable_interspersed_args() by default, because a Cmdln instance
-      has sub-commands which may themselves have options.
-    - Redirect print_help() to the Cmdln.do_help() which is better
-      equiped to handle the "help" action.
-    - error() will raise a CmdlnUserError: OptionParse.error() is meant
-      to be called for user errors. Raising a well-known error here can
-      make error handling clearer.
-    - Also see the changes in _OptionParserEx.
-    """
-    def __init__(self, cmdln, **kwargs):
-        self.cmdln = cmdln
-        kwargs["prog"] = self.cmdln.name
-        _OptionParserEx.__init__(self, **kwargs)
-        self.disable_interspersed_args()
-
-    def print_help(self, file=None):
-        self.cmdln.onecmd(["help"])
-
-    def error(self, msg):
-        raise CmdlnUserError(msg)
-
-
-class SubCmdOptionParser(_OptionParserEx):
-    def set_cmdln_info(self, cmdln, subcmd):
-        """Called by Cmdln to pass relevant info about itself needed
-        for print_help().
-        """
-        self.cmdln = cmdln
-        self.subcmd = subcmd
-
-    def print_help(self, file=None):
-        self.cmdln.onecmd(["help", self.subcmd])
-
-    def error(self, msg):
-        raise CmdlnUserError(msg)
-
-
-def option(*args, **kwargs):
-    """Decorator to add an option to the optparser argument of a Cmdln
-    subcommand.
-    
-    Example:
-        class MyShell(cmdln.Cmdln):
-            @cmdln.option("-f", "--force", help="force removal")
-            def do_remove(self, subcmd, opts, *args):
-                #...
-    """
-    #XXX Is there a possible optimization for many options to not have a
-    #    large stack depth here?
-    def decorate(f):
-        if not hasattr(f, "optparser"):
-            f.optparser = SubCmdOptionParser()
-        f.optparser.add_option(*args, **kwargs)
-        return f
-    return decorate
-
-
-class Cmdln(RawCmdln):
-    """An improved (on cmd.Cmd) framework for building multi-subcommand
-    scripts (think "svn" & "cvs") and simple shells (think "pdb" and
-    "gdb").
-
-    A simple example:
-
-        import cmdln
-
-        class MySVN(cmdln.Cmdln):
-            name = "svn"
-
-            @cmdln.aliases('stat', 'st')
-            @cmdln.option('-v', '--verbose', action='store_true'
-                          help='print verbose information')
-            def do_status(self, subcmd, opts, *paths):
-                print "handle 'svn status' command"
-
-            #...
-
-        if __name__ == "__main__":
-            shell = MySVN()
-            retval = shell.main()
-            sys.exit(retval)
-
-    'Cmdln' extends 'RawCmdln' by providing optparse option processing
-    integration.  See this class' _dispatch_cmd() docstring and
-    <http://trentm.com/projects/cmdln> for more information.
-    """
-    def _dispatch_cmd(self, handler, argv):
-        """Introspect sub-command handler signature to determine how to
-        dispatch the command. The raw handler provided by the base
-        'RawCmdln' class is still supported:
-
-            def do_foo(self, argv):
-                # 'argv' is the vector of command line args, argv[0] is
-                # the command name itself (i.e. "foo" or an alias)
-                pass
-
-        In addition, if the handler has more than 2 arguments option
-        processing is automatically done (using optparse):
-
-            @cmdln.option('-v', '--verbose', action='store_true')
-            def do_bar(self, subcmd, opts, *args):
-                # subcmd = <"bar" or an alias>
-                # opts = <an optparse.Values instance>
-                if opts.verbose:
-                    print "lots of debugging output..."
-                # args = <tuple of arguments>
-                for arg in args:
-                    bar(arg)
-
-        TODO: explain that "*args" can be other signatures as well.
-
-        The `cmdln.option` decorator corresponds to an `add_option()`
-        method call on an `optparse.OptionParser` instance.
-
-        You can declare a specific number of arguments:
-
-            @cmdln.option('-v', '--verbose', action='store_true')
-            def do_bar2(self, subcmd, opts, bar_one, bar_two):
-                #...
-
-        and an appropriate error message will be raised/printed if the
-        command is called with a different number of args.
-        """
-        co_argcount = handler.im_func.func_code.co_argcount
-        if co_argcount == 2:   # handler ::= do_foo(self, argv)
-            return handler(argv)
-        elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
-            try:
-                optparser = handler.optparser
-            except AttributeError:
-                optparser = handler.im_func.optparser = SubCmdOptionParser()
-            assert isinstance(optparser, SubCmdOptionParser)
-            optparser.set_cmdln_info(self, argv[0])
-            try:
-                opts, args = optparser.parse_args(argv[1:])
-            except StopOptionProcessing:
-                #TODO: this doesn't really fly for a replacement of
-                #      optparse.py behaviour, does it?
-                return 0 # Normal command termination
-
-            try:
-                return handler(argv[0], opts, *args)
-            except TypeError, ex:
-                # Some TypeError's are user errors:
-                #   do_foo() takes at least 4 arguments (3 given)
-                #   do_foo() takes at most 5 arguments (6 given)
-                #   do_foo() takes exactly 5 arguments (6 given)
-                # Raise CmdlnUserError for these with a suitably
-                # massaged error message.
-                import sys
-                tb = sys.exc_info()[2] # the traceback object
-                if tb.tb_next is not None:
-                    # If the traceback is more than one level deep, then the
-                    # TypeError do *not* happen on the "handler(...)" call
-                    # above. In that we don't want to handle it specially
-                    # here: it would falsely mask deeper code errors.
-                    raise
-                msg = ex.args[0]
-                match = _INCORRECT_NUM_ARGS_RE.search(msg)
-                if match:
-                    msg = list(match.groups())
-                    msg[1] = int(msg[1]) - 3
-                    if msg[1] == 1:
-                        msg[2] = msg[2].replace("arguments", "argument")
-                    msg[3] = int(msg[3]) - 3
-                    msg = ''.join(map(str, msg))
-                    raise CmdlnUserError(msg)
-                else:
-                    raise
-        else:
-            raise CmdlnError("incorrect argcount for %s(): takes %d, must "
-                             "take 2 for 'argv' signature or 3+ for 'opts' "
-                             "signature" % (handler.__name__, co_argcount))
-        
-
-
-#---- internal support functions
-
-def _format_linedata(linedata, indent, indent_width):
-    """Format specific linedata into a pleasant layout.
-    
-        "linedata" is a list of 2-tuples of the form:
-            (<item-display-string>, <item-docstring>)
-        "indent" is a string to use for one level of indentation
-        "indent_width" is a number of columns by which the
-            formatted data will be indented when printed.
-
-    The <item-display-string> column is held to 15 columns.
-    """
-    lines = []
-    WIDTH = 78 - indent_width
-    SPACING = 2
-    NAME_WIDTH_LOWER_BOUND = 13
-    NAME_WIDTH_UPPER_BOUND = 16
-    NAME_WIDTH = max([len(s) for s,d in linedata])
-    if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
-        NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
-    else:
-        NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
-
-    DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
-    for namestr, doc in linedata:
-        line = indent + namestr
-        if len(namestr) <= NAME_WIDTH:
-            line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
-        else:
-            lines.append(line)
-            line = indent + ' ' * (NAME_WIDTH + SPACING)
-        line += _summarize_doc(doc, DOC_WIDTH)
-        lines.append(line.rstrip())
-    return lines
-
-def _summarize_doc(doc, length=60):
-    r"""Parse out a short one line summary from the given doclines.
-    
-        "doc" is the doc string to summarize.
-        "length" is the max length for the summary
-
-    >>> _summarize_doc("this function does this")
-    'this function does this'
-    >>> _summarize_doc("this function does this", 10)
-    'this fu...'
-    >>> _summarize_doc("this function does this\nand that")
-    'this function does this and that'
-    >>> _summarize_doc("this function does this\n\nand that")
-    'this function does this'
-    """
-    import re
-    if doc is None:
-        return ""
-    assert length > 3, "length <= 3 is absurdly short for a doc summary"
-    doclines = doc.strip().splitlines(0)
-    if not doclines:
-        return ""
-
-    summlines = []
-    for i, line in enumerate(doclines):
-        stripped = line.strip()
-        if not stripped:
-            break
-        summlines.append(stripped)
-        if len(''.join(summlines)) >= length:
-            break
-
-    summary = ' '.join(summlines)
-    if len(summary) > length:
-        summary = summary[:length-3] + "..." 
-    return summary
-
-
-def line2argv(line):
-    r"""Parse the given line into an argument vector.
-    
-        "line" is the line of input to parse.
-
-    This may get niggly when dealing with quoting and escaping. The
-    current state of this parsing may not be completely thorough/correct
-    in this respect.
-    
-    >>> from cmdln import line2argv
-    >>> line2argv("foo")
-    ['foo']
-    >>> line2argv("foo bar")
-    ['foo', 'bar']
-    >>> line2argv("foo bar ")
-    ['foo', 'bar']
-    >>> line2argv(" foo bar")
-    ['foo', 'bar']
-
-    Quote handling:
-    
-    >>> line2argv("'foo bar'")
-    ['foo bar']
-    >>> line2argv('"foo bar"')
-    ['foo bar']
-    >>> line2argv(r'"foo\"bar"')
-    ['foo"bar']
-    >>> line2argv("'foo bar' spam")
-    ['foo bar', 'spam']
-    >>> line2argv("'foo 'bar spam")
-    ['foo bar', 'spam']
-    
-    >>> line2argv('some\tsimple\ttests')
-    ['some', 'simple', 'tests']
-    >>> line2argv('a "more complex" test')
-    ['a', 'more complex', 'test']
-    >>> line2argv('a more="complex test of " quotes')
-    ['a', 'more=complex test of ', 'quotes']
-    >>> line2argv('a more" complex test of " quotes')
-    ['a', 'more complex test of ', 'quotes']
-    >>> line2argv('an "embedded \\"quote\\""')
-    ['an', 'embedded "quote"']
-
-    # Komodo bug 48027
-    >>> line2argv('foo bar C:\\')
-    ['foo', 'bar', 'C:\\']
-
-    # Komodo change 127581
-    >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
-    ['\\test\\slash', 'foo bar', 'foo"bar']
-
-    # Komodo change 127629
-    >>> if sys.platform == "win32":
-    ...     line2argv(r'\foo\bar') == ['\\foo\\bar']
-    ...     line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
-    ...     line2argv('"foo') == ['foo']
-    ... else:
-    ...     line2argv(r'\foo\bar') == ['foobar']
-    ...     line2argv(r'\\foo\\bar') == ['\\foo\\bar']
-    ...     try:
-    ...         line2argv('"foo')
-    ...     except ValueError, ex:
-    ...         "not terminated" in str(ex)
-    True
-    True
-    True
-    """
-    import string
-    line = line.strip()
-    argv = []
-    state = "default"
-    arg = None  # the current argument being parsed
-    i = -1
-    while 1:
-        i += 1
-        if i >= len(line): break
-        ch = line[i]
-
-        if ch == "\\" and i+1 < len(line):
-            # escaped char always added to arg, regardless of state
-            if arg is None: arg = ""
-            if (sys.platform == "win32"
-                or state in ("double-quoted", "single-quoted")
-               ) and line[i+1] not in tuple('"\''):
-                arg += ch
-            i += 1
-            arg += line[i]
-            continue
-
-        if state == "single-quoted":
-            if ch == "'":
-                state = "default"
-            else:
-                arg += ch
-        elif state == "double-quoted":
-            if ch == '"':
-                state = "default"
-            else:
-                arg += ch
-        elif state == "default":
-            if ch == '"':
-                if arg is None: arg = ""
-                state = "double-quoted"
-            elif ch == "'":
-                if arg is None: arg = ""
-                state = "single-quoted"
-            elif ch in string.whitespace:
-                if arg is not None:
-                    argv.append(arg)
-                arg = None
-            else:
-                if arg is None: arg = ""
-                arg += ch
-    if arg is not None:
-        argv.append(arg)
-    if not sys.platform == "win32" and state != "default":
-        raise ValueError("command line is not terminated: unfinished %s "
-                         "segment" % state)
-    return argv
-
-
-def argv2line(argv):
-    r"""Put together the given argument vector into a command line.
-    
-        "argv" is the argument vector to process.
-    
-    >>> from cmdln import argv2line
-    >>> argv2line(['foo'])
-    'foo'
-    >>> argv2line(['foo', 'bar'])
-    'foo bar'
-    >>> argv2line(['foo', 'bar baz'])
-    'foo "bar baz"'
-    >>> argv2line(['foo"bar'])
-    'foo"bar'
-    >>> print argv2line(['foo" bar'])
-    'foo" bar'
-    >>> print argv2line(["foo' bar"])
-    "foo' bar"
-    >>> argv2line(["foo'bar"])
-    "foo'bar"
-    """
-    escapedArgs = []
-    for arg in argv:
-        if ' ' in arg and '"' not in arg:
-            arg = '"'+arg+'"'
-        elif ' ' in arg and "'" not in arg:
-            arg = "'"+arg+"'"
-        elif ' ' in arg:
-            arg = arg.replace('"', r'\"')
-            arg = '"'+arg+'"'
-        escapedArgs.append(arg)
-    return ' '.join(escapedArgs)
-
-
-# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
-def _dedentlines(lines, tabsize=8, skip_first_line=False):
-    """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
-    
-        "lines" is a list of lines to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-    
-    Same as dedent() except operates on a sequence of lines. Note: the
-    lines list is modified **in-place**.
-    """
-    DEBUG = False
-    if DEBUG: 
-        print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
-              % (tabsize, skip_first_line)
-    indents = []
-    margin = None
-    for i, line in enumerate(lines):
-        if i == 0 and skip_first_line: continue
-        indent = 0
-        for ch in line:
-            if ch == ' ':
-                indent += 1
-            elif ch == '\t':
-                indent += tabsize - (indent % tabsize)
-            elif ch in '\r\n':
-                continue # skip all-whitespace lines
-            else:
-                break
-        else:
-            continue # skip all-whitespace lines
-        if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
-        if margin is None:
-            margin = indent
-        else:
-            margin = min(margin, indent)
-    if DEBUG: print "dedent: margin=%r" % margin
-
-    if margin is not None and margin > 0:
-        for i, line in enumerate(lines):
-            if i == 0 and skip_first_line: continue
-            removed = 0
-            for j, ch in enumerate(line):
-                if ch == ' ':
-                    removed += 1
-                elif ch == '\t':
-                    removed += tabsize - (removed % tabsize)
-                elif ch in '\r\n':
-                    if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
-                    lines[i] = lines[i][j:]
-                    break
-                else:
-                    raise ValueError("unexpected non-whitespace char %r in "
-                                     "line %r while removing %d-space margin"
-                                     % (ch, line, margin))
-                if DEBUG:
-                    print "dedent: %r: %r -> removed %d/%d"\
-                          % (line, ch, removed, margin)
-                if removed == margin:
-                    lines[i] = lines[i][j+1:]
-                    break
-                elif removed > margin:
-                    lines[i] = ' '*(removed-margin) + lines[i][j+1:]
-                    break
-    return lines
-
-def _dedent(text, tabsize=8, skip_first_line=False):
-    """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
-
-        "text" is the text to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-    
-    textwrap.dedent(s), but don't expand tabs to spaces
-    """
-    lines = text.splitlines(1)
-    _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
-    return ''.join(lines)
-
-
-def _get_indent(marker, s, tab_width=8):
-    """_get_indent(marker, s, tab_width=8) ->
-        (<indentation-of-'marker'>, <indentation-width>)"""
-    # Figure out how much the marker is indented.
-    INDENT_CHARS = tuple(' \t')
-    start = s.index(marker)
-    i = start
-    while i > 0:
-        if s[i-1] not in INDENT_CHARS:
-            break
-        i -= 1
-    indent = s[i:start]
-    indent_width = 0
-    for ch in indent:
-        if ch == ' ':
-            indent_width += 1
-        elif ch == '\t':
-            indent_width += tab_width - (indent_width % tab_width)
-    return indent, indent_width
-
-def _get_trailing_whitespace(marker, s):
-    """Return the whitespace content trailing the given 'marker' in string 's',
-    up to and including a newline.
-    """
-    suffix = ''
-    start = s.index(marker) + len(marker)
-    i = start
-    while i < len(s):
-        if s[i] in ' \t':
-            suffix += s[i]
-        elif s[i] in '\r\n':
-            suffix += s[i]
-            if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
-                suffix += s[i+1]
-            break
-        else:
-            break
-        i += 1
-    return suffix
-
-
-
-#---- bash completion support
-# Note: This is still experimental. I expect to change this
-# significantly.
-#
-# To get Bash completion for a cmdln.Cmdln class, run the following
-# bash command:
-#   $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
-# For example:
-#   $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
-#
-#TODO: Simplify the above so don't have to given path to script (try to
-#      find it on PATH, if possible). Could also make class name
-#      optional if there is only one in the module (common case).
-
-if __name__ == "__main__" and len(sys.argv) == 6:
-    def _log(s):
-        return # no-op, comment out for debugging
-        from os.path import expanduser
-        fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
-        fout.write(str(s) + '\n')
-        fout.close()
-
-    # Recipe: module_from_path (1.0.1+)
-    def _module_from_path(path):
-        import imp, os, sys
-        path = os.path.expanduser(path)
-        dir = os.path.dirname(path) or os.curdir
-        name = os.path.splitext(os.path.basename(path))[0]
-        sys.path.insert(0, dir)
-        try:
-            iinfo = imp.find_module(name, [dir])
-            return imp.load_module(name, *iinfo)
-        finally:
-            sys.path.remove(dir)
-
-    def _get_bash_cplns(script_path, class_name, cmd_name,
-                        token, preceding_token):
-        _log('--')
-        _log('get_cplns(%r, %r, %r, %r, %r)'
-             % (script_path, class_name, cmd_name, token, preceding_token))
-        comp_line = os.environ["COMP_LINE"]
-        comp_point = int(os.environ["COMP_POINT"])
-        _log("COMP_LINE: %r" % comp_line)
-        _log("COMP_POINT: %r" % comp_point)
-
-        try:
-            script = _module_from_path(script_path)
-        except ImportError, ex:
-            _log("error importing `%s': %s" % (script_path, ex))
-            return []
-        shell = getattr(script, class_name)()
-        cmd_map = shell._get_canonical_map()
-        del cmd_map["EOF"]
-
-        # Determine if completing the sub-command name.
-        parts = comp_line[:comp_point].split(None, 1)
-        _log(parts)
-        if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
-            #TODO: if parts[1].startswith('-'): handle top-level opts
-            _log("complete sub-command names")
-            matches = {}
-            for name, canon_name in cmd_map.items():
-                if name.startswith(token):
-                    matches[name] = canon_name
-            if not matches:
-                return []
-            elif len(matches) == 1:
-                return matches.keys()
-            elif len(set(matches.values())) == 1:
-                return [matches.values()[0]]
-            else:
-                return matches.keys()
-
-        # Otherwise, complete options for the given sub-command.
-        #TODO: refine this so it does the right thing with option args
-        if token.startswith('-'):
-            cmd_name = comp_line.split(None, 2)[1]
-            try:
-                cmd_canon_name = cmd_map[cmd_name]
-            except KeyError:
-                return []
-            handler = shell._get_cmd_handler(cmd_canon_name)
-            optparser = getattr(handler, "optparser", None)
-            if optparser is None:
-                optparser = SubCmdOptionParser()
-            opt_strs = []
-            for option in optparser.option_list:
-                for opt_str in option._short_opts + option._long_opts:
-                    if opt_str.startswith(token):
-                        opt_strs.append(opt_str)
-            return opt_strs
-
-        return []
-
-    for cpln in _get_bash_cplns(*sys.argv[1:]):
-        print cpln
-


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -23,10 +23,12 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from yt.config import ytcfg
+ytcfg["yt","__command_line"] = "True"
+from yt.startup_tasks import parser, subparsers
 from yt.mods import *
 from yt.funcs import *
-import cmdln as cmdln
-import optparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
+import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
 import urllib, urllib2, base64
 
 def _fix_pf(arg):
@@ -40,13 +42,68 @@
         pf = load(arg[:-10])
     else:
         pf = load(arg)
-    if pf is None:
-        raise IOError
     return pf
 
+def _add_arg(sc, arg):
+    if isinstance(arg, types.StringTypes):
+        arg = _common_options[arg].copy()
+    argnames = []
+    if "short" in arg: argnames.append(arg.pop('short'))
+    if "long" in arg: argnames.append(arg.pop('long'))
+    sc.add_argument(*argnames, **arg)
+
+class YTCommand(object):
+    args = ()
+    name = None
+    description = ""
+    aliases = ()
+    npfs = 1
+
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if cls.name is not None:
+                sc = subparsers.add_parser(cls.name,
+                    description = cls.description,
+                    help = cls.description)
+                sc.set_defaults(func=cls.run)
+                for arg in cls.args:
+                    _add_arg(sc, arg)
+
+    @classmethod
+    def run(cls, args):
+        self = cls()
+        # Some commands need to be run repeatedly on parameter files
+        # In fact, this is the rule and the opposite is the exception
+        # BUT, we only want to parse the arguments once.
+        if cls.npfs > 1:
+            self(args)
+        else:
+            if len(getattr(args, "pf", [])) > 1:
+                pfs = args.pf
+                for pf in pfs:
+                    args.pf = pf
+                    self(args)
+            else:
+                args.pf = getattr(args, 'pf', [None])[0]
+                self(args)
+
+class GetParameterFiles(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string = None):
+        if len(values) == 1:
+            pfs = values
+        elif len(values) == 2 and namespace.basename is not None:
+            pfs = ["%s%04i" % (opts.basename, r)
+                   for r in range(int(values[0]), int(values[1]), opts.skip) ]
+        else:
+            pfs = values
+        namespace.pf = [_fix_pf(pf) for pf in pfs]
+
 _common_options = dict(
+    pf      = dict(short="pf", action=GetParameterFiles,
+                   nargs="+", help="Parameter files to run on"),
     axis    = dict(short="-a", long="--axis",
-                   action="store", type="int",
+                   action="store", type=int,
                    dest="axis", default=4,
                    help="Axis (4 for all three)"),
     log     = dict(short="-l", long="--log",
@@ -54,208 +111,173 @@
                    dest="takelog", default=True,
                    help="Take the log of the field?"),
     text    = dict(short="-t", long="--text",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="text", default=None,
                    help="Textual annotation"),
     field   = dict(short="-f", long="--field",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="field", default="Density",
                    help="Field to color by"),
     weight  = dict(short="-g", long="--weight",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="weight", default=None,
                    help="Field to weight projections with"),
-    cmap    = dict(short="", long="--colormap",
-                   action="store", type="string",
+    cmap    = dict(long="--colormap",
+                   action="store", type=str,
                    dest="cmap", default="jet",
                    help="Colormap name"),
     zlim    = dict(short="-z", long="--zlim",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="zlim", default=None,
                    nargs=2,
                    help="Color limits (min, max)"),
-    dex     = dict(short="", long="--dex",
-                   action="store", type="float",
+    dex     = dict(long="--dex",
+                   action="store", type=float,
                    dest="dex", default=None,
                    nargs=1,
                    help="Number of dex above min to display"),
     width   = dict(short="-w", long="--width",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="width", default=1.0,
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="unit", default='unitary',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="center", default=None,
                    nargs=3,
                    help="Center, space separated (-1 -1 -1 for max)"),
     bn      = dict(short="-b", long="--basename",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="basename", default=None,
                    help="Basename of parameter files"),
     output  = dict(short="-o", long="--output",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="output", default="frames/",
                    help="Folder in which to place output images"),
     outputfn= dict(short="-o", long="--output",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="output", default=None,
                    help="File in which to place output"),
     skip    = dict(short="-s", long="--skip",
-                   action="store", type="int",
+                   action="store", type=int,
                    dest="skip", default=1,
                    help="Skip factor for outputs"),
     proj    = dict(short="-p", long="--projection",
                    action="store_true", 
                    dest="projection", default=False,
                    help="Use a projection rather than a slice"),
-    maxw    = dict(short="", long="--max-width",
-                   action="store", type="float",
+    maxw    = dict(long="--max-width",
+                   action="store", type=float,
                    dest="max_width", default=1.0,
                    help="Maximum width in code units"),
-    minw    = dict(short="", long="--min-width",
-                   action="store", type="float",
+    minw    = dict(long="--min-width",
+                   action="store", type=float,
                    dest="min_width", default=50,
                    help="Minimum width in units of smallest dx (default: 50)"),
     nframes = dict(short="-n", long="--nframes",
-                   action="store", type="int",
+                   action="store", type=int,
                    dest="nframes", default=100,
                    help="Number of frames to generate"),
-    slabw   = dict(short="", long="--slab-width",
-                   action="store", type="float",
+    slabw   = dict(long="--slab-width",
+                   action="store", type=float,
                    dest="slab_width", default=1.0,
                    help="Slab width in specified units"),
     slabu   = dict(short="-g", long="--slab-unit",
-                   action="store", type="string",
+                   action="store", type=str,
                    dest="slab_unit", default='1',
                    help="Desired units for the slab"),
-    ptype   = dict(short="", long="--particle-type",
-                   action="store", type="int",
+    ptype   = dict(long="--particle-type",
+                   action="store", type=int,
                    dest="ptype", default=2,
                    help="Particle type to select"),
-    agecut  = dict(short="", long="--age-cut",
-                   action="store", type="float",
+    agecut  = dict(long="--age-cut",
+                   action="store", type=float,
                    dest="age_filter", default=None,
                    nargs=2,
                    help="Bounds for the field to select"),
-    uboxes  = dict(short="", long="--unit-boxes",
+    uboxes  = dict(long="--unit-boxes",
                    action="store_true",
                    dest="unit_boxes",
                    help="Display helpful unit boxes"),
-    thresh  = dict(short="", long="--threshold",
-                   action="store", type="float",
+    thresh  = dict(long="--threshold",
+                   action="store", type=float,
                    dest="threshold", default=None,
                    help="Density threshold"),
-    dm_only = dict(short="", long="--all-particles",
+    dm_only = dict(long="--all-particles",
                    action="store_false", 
                    dest="dm_only", default=True,
                    help="Use all particles"),
-    grids   = dict(short="", long="--show-grids",
+    grids   = dict(long="--show-grids",
                    action="store_true",
                    dest="grids", default=False,
                    help="Show the grid boundaries"),
-    time    = dict(short="", long="--time",
+    time    = dict(long="--time",
                    action="store_true",
                    dest="time", default=False,
                    help="Print time in years on image"),
-    contours    = dict(short="", long="--contours",
-                   action="store",type="int",
+    contours    = dict(long="--contours",
+                   action="store",type=int,
                    dest="contours", default=None,
                    help="Number of Contours for Rendering"),
-    contour_width  = dict(short="", long="--contour_width",
-                   action="store",type="float",
+    contour_width  = dict(long="--contour_width",
+                   action="store",type=float,
                    dest="contour_width", default=None,
                    help="Width of gaussians used for rendering."),
-    enhance   = dict(short="", long="--enhance",
+    enhance   = dict(long="--enhance",
                    action="store_true",
                    dest="enhance", default=False,
                    help="Enhance!"),
     valrange  = dict(short="-r", long="--range",
-                   action="store", type="float",
+                   action="store", type=float,
                    dest="valrange", default=None,
                    nargs=2,
                    help="Range, space separated"),
-    up  = dict(short="", long="--up",
-                   action="store", type="float",
+    up  = dict(long="--up",
+                   action="store", type=float,
                    dest="up", default=None,
                    nargs=3,
                    help="Up, space separated"),
-    viewpoint  = dict(short="", long="--viewpoint",
-                   action="store", type="float",
+    viewpoint  = dict(long="--viewpoint",
+                   action="store", type=float,
                    dest="viewpoint", default=[1., 1., 1.],
                    nargs=3,
                    help="Viewpoint, space separated"),
-    pixels    = dict(short="", long="--pixels",
-                   action="store",type="int",
+    pixels    = dict(long="--pixels",
+                   action="store",type=int,
                    dest="pixels", default=None,
                    help="Number of Pixels for Rendering"),
-    halos   = dict(short="", long="--halos",
-                   action="store", type="string",
+    halos   = dict(long="--halos",
+                   action="store", type=str,
                    dest="halos",default="multiple",
                    help="Run halo profiler on a 'single' halo or 'multiple' halos."),
-    halo_radius = dict(short="", long="--halo_radius",
-                       action="store", type="float",
+    halo_radius = dict(long="--halo_radius",
+                       action="store", type=float,
                        dest="halo_radius",default=0.1,
                        help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1."),
-    halo_radius_units = dict(short="", long="--halo_radius_units",
-                             action="store", type="string",
+    halo_radius_units = dict(long="--halo_radius_units",
+                             action="store", type=str,
                              dest="halo_radius_units",default="1",
                              help="Units for radius used with --halo_radius flag. Default: '1' (code units)."),
-    halo_hop_style = dict(short="", long="--halo_hop_style",
-                          action="store", type="string",
+    halo_hop_style = dict(long="--halo_hop_style",
+                          action="store", type=str,
                           dest="halo_hop_style",default="new",
                           help="Style of hop output file.  'new' for yt_hop files and 'old' for enzo_hop files."),
-    halo_parameter_file = dict(short="", long="--halo_parameter_file",
-                               action="store", type="string",
+    halo_parameter_file = dict(long="--halo_parameter_file",
+                               action="store", type=str,
                                dest="halo_parameter_file",default=None,
                                help="HaloProfiler parameter file."),
-    make_profiles = dict(short="", long="--make_profiles",
+    make_profiles = dict(long="--make_profiles",
                          action="store_true", default=False,
                          help="Make profiles with halo profiler."),
-    make_projections = dict(short="", long="--make_projections",
+    make_projections = dict(long="--make_projections",
                             action="store_true", default=False,
                             help="Make projections with halo profiler.")
 
     )
 
-def _add_options(parser, *options):
-    for opt in options:
-        oo = _common_options[opt].copy()
-        parser.add_option(oo.pop("short"), oo.pop("long"), **oo)
-
-def _get_parser(*options):
-    parser = optparse.OptionParser()
-    _add_options(parser, *options)
-    return parser
-
-def add_cmd_options(options):
-    opts = []
-    for option in options:
-        vals = _common_options[option].copy()
-        opts.append(([vals.pop("short"), vals.pop("long")],
-                      vals))
-    def apply_options(func):
-        for args, kwargs in opts:
-            func = cmdln.option(*args, **kwargs)(func)
-        return func
-    return apply_options
-
-def check_args(func):
-    @wraps(func)
-    def arg_iterate(self, subcmd, opts, *args):
-        if len(args) == 1:
-            pfs = args
-        elif len(args) == 2 and opts.basename is not None:
-            pfs = ["%s%04i" % (opts.basename, r)
-                   for r in range(int(args[0]), int(args[1]), opts.skip) ]
-        else: pfs = args
-        for arg in pfs:
-            func(self, subcmd, opts, arg)
-    return arg_iterate
-
 def _update_hg(path, skip_rebuild = False):
     from mercurial import hg, ui, commands
     f = open(os.path.join(path, "yt_updater.log"), "a")
@@ -355,20 +377,16 @@
     # Now we think we have our supplemental repository.
     return supp_path
 
-class YTCommands(cmdln.Cmdln):
-    name="yt"
 
-    def __init__(self, *args, **kwargs):
-        cmdln.Cmdln.__init__(self, *args, **kwargs)
-        cmdln.Cmdln.do_help.aliases.append("h")
-
-    def do_update(self, subcmd, opts):
+class YTUpdateCmd(YTCommand):
+    name = "update"
+    description = \
         """
         Update the yt installation to the most recent version
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
+
+    def __call__(self, opts):
         import pkg_resources
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
@@ -385,7 +403,7 @@
                 update_supp = True
         vstring = None
         if "site-packages" not in path:
-            vstring = _get_hg_version(path)
+            vstring = get_hg_version(path)
             print
             print "The current version of the code is:"
             print
@@ -394,7 +412,7 @@
             print "---"
             print
             print "This installation CAN be automatically updated."
-            _update_hg(path)
+            update_hg(path)
             print "Updated successfully."
         else:
             print
@@ -404,19 +422,24 @@
             print "updating to the newest changeset."
             print
 
-    @cmdln.option("-u", "--update-source", action="store_true",
-                  default = False,
-                  help="Update the yt installation, if able")
-    @cmdln.option("-o", "--output-version", action="store",
+class YTInstInfoCmd(YTCommand):
+    name = "instinfo"
+    args = (
+            dict(short="-u", long="--update-source", action="store_true",
+                 default = False,
+                 help="Update the yt installation, if able"),
+            dict(short="-o", long="--output-version", action="store",
                   default = None, dest="outputfile",
-                  help="File into which the current revision number will be stored")
-    def do_instinfo(self, subcmd, opts):
+                  help="File into which the current revision number will be" +
+                       "stored")
+           )
+    description = \
         """
         Get some information about the yt installation
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
+
+    def __call__(self, opts):
         import pkg_resources
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
@@ -433,7 +456,7 @@
                 update_supp = True
         vstring = None
         if "site-packages" not in path:
-            vstring = _get_hg_version(path)
+            vstring = get_hg_version(path)
             print
             print "The current version of the code is:"
             print
@@ -443,7 +466,7 @@
             print
             print "This installation CAN be automatically updated."
             if opts.update_source:  
-                _update_hg(path)
+                update_hg(path)
             print "Updated successfully."
         elif opts.update_source:
             print
@@ -455,15 +478,18 @@
         if vstring is not None and opts.outputfile is not None:
             open(opts.outputfile, "w").write(vstring)
 
-    def do_load(self, subcmd, opts, arg):
+class YTLoadCmd(YTCommand):
+    name = "load"
+    description = \
         """
         Load a single dataset into an IPython instance
 
-        ${cmd_option_list}
         """
-        try:
-            pf = _fix_pf(arg)
-        except IOError:
+
+    args = ("pf", )
+
+    def __call__(self, args):
+        if args.pf is None:
             print "Could not load file."
             sys.exit()
         import yt.mods
@@ -475,7 +501,7 @@
             api_version = '0.11'
 
         local_ns = yt.mods.__dict__.copy()
-        local_ns['pf'] = pf
+        local_ns['pf'] = args.pf
 
         if api_version == '0.10':
             shell = IPython.Shell.IPShellEmbed()
@@ -491,159 +517,177 @@
             from IPython.frontend.terminal.embed import InteractiveShellEmbed
             ipshell = InteractiveShellEmbed(config=cfg)
 
-    @add_cmd_options(['outputfn','bn','thresh','dm_only','skip'])
-    @check_args
-    def do_hop(self, subcmd, opts, arg):
+class YTHopCmd(YTCommand):
+    args = ('outputfn','bn','thresh','dm_only','skip', 'pf')
+    name = "hop"
+    description = \
         """
         Run HOP on one or more datasets
 
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
-        kwargs = {'dm_only' : opts.dm_only}
-        if opts.threshold is not None: kwargs['threshold'] = opts.threshold
+
+    def __call__(self, args):
+        pf = args.pf
+        kwargs = {'dm_only' : args.dm_only}
+        if args.threshold is not None: kwargs['threshold'] = args.threshold
         hop_list = HaloFinder(pf, **kwargs)
-        if opts.output is None: fn = "%s.hop" % pf
-        else: fn = opts.output
+        if args.output is None: fn = "%s.hop" % pf
+        else: fn = args.output
         hop_list.write_out(fn)
 
-    @add_cmd_options(['make_profiles','make_projections','halo_parameter_file',
-                      'halos','halo_hop_style','halo_radius','halo_radius_units'])
-    def do_halos(self, subcmd, opts, arg):
+class YTHalosCmd(YTCommand):
+    name = "halos"
+    args = ('make_profiles','make_projections','halo_parameter_file',
+            'halos','halo_hop_style','halo_radius','halo_radius_units', 'pf')
+    description = \
         """
         Run HaloProfiler on one dataset
 
-        ${cmd_option_list}
         """
+    def __call__(self, args):
         import yt.analysis_modules.halo_profiler.api as HP
-        kwargs = {'halos': opts.halos,
-                  'halo_radius': opts.halo_radius,
-                  'radius_units': opts.halo_radius_units}
+        kwargs = {'halos': args.halos,
+                  'halo_radius': args.halo_radius,
+                  'radius_units': args.halo_radius_units}
 
-        hp = HP.HaloProfiler(arg,opts.halo_parameter_file,**kwargs)
-        if opts.make_profiles:
+        hp = HP.HaloProfiler(arg,args.halo_parameter_file,**kwargs)
+        if args.make_profiles:
             hp.make_profiles()
-        if opts.make_projections:
+        if args.make_projections:
             hp.make_projections()
 
-    @add_cmd_options(["width", "unit", "bn", "proj", "center",
-                      "zlim", "axis", "field", "weight", "skip",
-                      "cmap", "output", "grids", "time"])
-    @check_args
-    def do_plot(self, subcmd, opts, arg):
+class YTPlotCmd(YTCommand):
+    args = ("width", "unit", "bn", "proj", "center",
+            "zlim", "axis", "field", "weight", "skip",
+            "cmap", "output", "grids", "time", "pf")
+    name = "plot"
+    
+    description = \
         """
         Create a set of images
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
-        center = opts.center
-        if opts.center == (-1,-1,-1):
+
+    def __call__(self, args):
+        pf = args.pf
+        center = args.center
+        if args.center == (-1,-1,-1):
             mylog.info("No center fed in; seeking.")
             v, center = pf.h.find_max("Density")
-        elif opts.center is None:
+        elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = na.array(center)
         pc=PlotCollection(pf, center=center)
-        if opts.axis == 4:
+        if args.axis == 4:
             axes = range(3)
         else:
-            axes = [opts.axis]
+            axes = [args.axis]
         for ax in axes:
             mylog.info("Adding plot for axis %i", ax)
-            if opts.projection: pc.add_projection(opts.field, ax,
-                                    weight_field=opts.weight, center=center)
-            else: pc.add_slice(opts.field, ax, center=center)
-            if opts.grids: pc.plots[-1].modify["grids"]()
-            if opts.time: 
+            if args.projection: pc.add_projection(args.field, ax,
+                                    weight_field=args.weight, center=center)
+            else: pc.add_slice(args.field, ax, center=center)
+            if args.grids: pc.plots[-1].modify["grids"]()
+            if args.time: 
                 time = pf.current_time*pf['Time']*pf['years']
                 pc.plots[-1].modify["text"]((0.2,0.8), 't = %5.2e yr'%time)
-        pc.set_width(opts.width, opts.unit)
-        pc.set_cmap(opts.cmap)
-        if opts.zlim: pc.set_zlim(*opts.zlim)
-        if not os.path.isdir(opts.output): os.makedirs(opts.output)
-        pc.save(os.path.join(opts.output,"%s" % (pf)))
+        pc.set_width(args.width, args.unit)
+        pc.set_cmap(args.cmap)
+        if args.zlim: pc.set_zlim(*args.zlim)
+        if not os.path.isdir(args.output): os.makedirs(args.output)
+        pc.save(os.path.join(args.output,"%s" % (pf)))
 
-    @add_cmd_options(["proj", "field", "weight"])
-    @cmdln.option("-a", "--axis", action="store", type="int",
-                   dest="axis", default=0, help="Axis (4 for all three)")
-    @cmdln.option("-o", "--host", action="store", type="string",
-                   dest="host", default=None, help="IP Address to bind on")
-    @check_args
-    def do_mapserver(self, subcmd, opts, arg):
+class YTMapserverCmd(YTCommand):
+    args = ("proj", "field", "weight",
+            dict(short="-a", long="--axis", action="store", type=int,
+                 dest="axis", default=0, help="Axis (4 for all three)"),
+            dict(short ="-o", long="--host", action="store", type=str,
+                   dest="host", default=None, help="IP Address to bind on"),
+            "pf",
+            )
+    
+    name = "mapserver"
+    description = \
         """
         Serve a plot in a GMaps-style interface
 
-        ${cmd_usage}
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
+
+    def __call__(self, args):
+        pf = args.pf
         pc=PlotCollection(pf, center=0.5*(pf.domain_left_edge +
                                           pf.domain_right_edge))
-        if opts.axis == 4:
+        if args.axis == 4:
             print "Doesn't work with multiple axes!"
             return
-        if opts.projection:
-            p = pc.add_projection(opts.field, opts.axis, weight_field=opts.weight)
+        if args.projection:
+            p = pc.add_projection(args.field, args.axis, weight_field=args.weight)
         else:
-            p = pc.add_slice(opts.field, opts.axis)
+            p = pc.add_slice(args.field, args.axis)
         from yt.gui.reason.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.data, opts.field)
+        mapper = PannableMapServer(p.data, args.field)
         import yt.utilities.bottle as bottle
         bottle.debug(True)
-        if opts.host is not None:
-            colonpl = opts.host.find(":")
+        if args.host is not None:
+            colonpl = args.host.find(":")
             if colonpl >= 0:
-                port = int(opts.host.split(":")[-1])
-                opts.host = opts.host[:colonpl]
+                port = int(args.host.split(":")[-1])
+                args.host = args.host[:colonpl]
             else:
                 port = 8080
-            bottle.run(server='rocket', host=opts.host, port=port)
+            bottle.run(server='rocket', host=args.host, port=port)
         else:
             bottle.run(server='rocket')
 
-    def do_rpdb(self, subcmd, opts, task):
+class YTRPDBCmd(YTCommand):
+    name = "rpdb"
+    description = \
         """
         Connect to a currently running (on localhost) rpd session.
 
         Commands run with --rpdb will trigger an rpdb session with any
         uncaught exceptions.
 
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         import rpdb
         rpdb.run_rpdb(int(task))
 
-    @add_cmd_options(['outputfn','bn','skip'])
-    @check_args
-    def do_stats(self, subcmd, opts, arg):
+class YTStatsCmd(YTCommand):
+    args = ('outputfn','bn','skip','pf')
+    name = "stats"
+    description = \
         """
         Print stats and maximum density for one or more datasets
 
-        ${cmd_option_list}
         """
-        pf = _fix_pf(arg)
+
+    def __call__(self, args):
+        pf = args.pf
         pf.h.print_stats()
         if "Density" in pf.h.field_list:
             v, c = pf.h.find_max("Density")
         print "Maximum density: %0.5e at %s" % (v, c)
-        if opts.output is not None:
+        if args.output is not None:
             t = pf.current_time * pf['years']
-            open(opts.output, "a").write(
+            open(args.output, "a").write(
                 "%s (%0.5e years): %0.5e at %s\n" % (pf, t, v, c))
 
-    @add_cmd_options([])
-    def _do_analyze(self, subcmd, opts, arg):
+class YTAnalyzeCmd(YTCommand):
+    
+    name = "analyze"
+    args = ('pf',)
+    description = \
         """
         Produce a set of analysis for a given output.  This includes
         HaloProfiler results with r200, as per the recipe file in the cookbook,
         profiles of a number of fields, projections of average Density and
         Temperature, and distribution functions for Density and Temperature.
 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         # We will do the following things:
         #   Halo profiling (default parameters ONLY)
         #   Projections: Density, Temperature
@@ -696,51 +740,48 @@
         ph.modify["line"](pr.field_data["Density"], pr.field_data["Temperature"])
         pc.save()
 
-    @cmdln.option("-d", "--desc", action="store",
-                  default = None, dest="desc",
-                  help="Description for this pasteboard entry")
-    def do_pasteboard(self, subcmd, opts, arg):
-        """
-        Place a file into your pasteboard.
-        """
-        if opts.desc is None: raise RuntimeError
-        from yt.utilities.pasteboard import PostInventory
-        pp = PostInventory()
-        pp.add_post(arg, desc=opts.desc)
-
-    @cmdln.option("-l", "--language", action="store",
+class YTPastebinCmd(YTCommand):
+    name = "pastebin"
+    args = (
+             dict(short="-l", long="--language", action="store",
                   default = None, dest="language",
-                  help="Use syntax highlighter for the file in language")
-    @cmdln.option("-L", "--languages", action="store_true",
+                  help="Use syntax highlighter for the file in language"),
+             dict(short="-L", long="--languages", action="store_true",
                   default = False, dest="languages",
-                  help="Retrive a list of supported languages")
-    @cmdln.option("-e", "--encoding", action="store",
+                  help="Retrive a list of supported languages"),
+             dict(short="-e", long="--encoding", action="store",
                   default = 'utf-8', dest="encoding",
                   help="Specify the encoding of a file (default is "
-                        "utf-8 or guessing if available)")
-    @cmdln.option("-b", "--open-browser", action="store_true",
+                        "utf-8 or guessing if available)"),
+             dict(short="-b", long="--open-browser", action="store_true",
                   default = False, dest="open_browser",
-                  help="Open the paste in a web browser")
-    @cmdln.option("-p", "--private", action="store_true",
+                  help="Open the paste in a web browser"),
+             dict(short="-p", long="--private", action="store_true",
                   default = False, dest="private",
-                  help="Paste as private")
-    @cmdln.option("-c", "--clipboard", action="store_true",
+                  help="Paste as private"),
+             dict(short="-c", long="--clipboard", action="store_true",
                   default = False, dest="clipboard",
-                  help="File to output to; else, print.")
-    def do_pastebin(self, subcmd, opts, arg):
+                  help="File to output to; else, print."),
+             dict(short="file", type=str),
+            )
+    description = \
         """
         Post a script to an anonymous pastebin
 
         Usage: yt pastebin [options] <script>
 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         import yt.utilities.lodgeit as lo
-        lo.main( arg, languages=opts.languages, language=opts.language,
-                 encoding=opts.encoding, open_browser=opts.open_browser,
-                 private=opts.private, clipboard=opts.clipboard)
+        lo.main(args.file, languages=args.languages, language=args.language,
+                 encoding=args.encoding, open_browser=args.open_browser,
+                 private=args.private, clipboard=args.clipboard)
 
-    def do_pastebin_grab(self, subcmd, opts, arg):
+class YTPastebinGrabCmd(YTCommand):
+    args = (dict(short="number", type=str),)
+    name = "pastebin_grab"
+    description = \
         """
         Print an online pastebin to STDOUT for local use. Paste ID is 
         the number at the end of the url.  So to locally access pastebin:
@@ -750,29 +791,21 @@
         Ex: yt pastebin_grab 1688 > script.py
 
         """
+
+    def __call__(self, args):
         import yt.utilities.lodgeit as lo
-        lo.main( None, download=arg )
+        lo.main( None, download=args.number )
 
-    @cmdln.option("-o", "--output", action="store",
-                  default = None, dest="output_fn",
-                  help="File to output to; else, print.")
-    def do_pasteboard_grab(self, subcmd, opts, username, paste_id):
-        """
-        Download from your or another user's pasteboard
 
-        ${cmd_usage} 
-        ${cmd_option_list}
-        """
-        from yt.utilities.pasteboard import retrieve_pastefile
-        retrieve_pastefile(username, paste_id, opts.output_fn)
-
-    def do_bugreport(self, subcmd, opts):
+class YTBugreportCmd(YTCommand):
+    name = "bureport"
+    description = \
         """
         Report a bug in yt
 
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+
+    def __call__(self, args):
         print "==============================================================="
         print
         print "Hi there!  Welcome to the yt bugreport taker."
@@ -879,13 +912,13 @@
         print "Keep in touch!"
         print
 
-    def do_bootstrap_dev(self, subcmd, opts):
+class YTBootstrapDevCmd(YTCommand):
+    name = "bootstrap_dev"
+    description = \
         """
         Bootstrap a yt development environment
-
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+    def __call__(self, args):
         from mercurial import hg, ui, commands
         import imp
         import getpass
@@ -895,7 +928,7 @@
         print "Hi there!  Welcome to the yt development bootstrap tool."
         print
         print "This should get you started with mercurial as well as a few"
-        print "other handy things, like a pasteboard of your very own."
+        print "other handy things"
         print
         # We have to do a couple things.
         # First, we check that YT_DEST is set.
@@ -917,7 +950,6 @@
         print " 1. Setting up your ~/.hgrc to have a username."
         print " 2. Setting up your bitbucket user account and the hgbb"
         print "    extension."
-        print " 3. Setting up a new pasteboard repository."
         print
         firstname = lastname = email_address = bbusername = repo_list = None
         # Now we try to import the cedit extension.
@@ -1090,89 +1122,6 @@
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])
-        # Now the only thing remaining to do is to set up the pasteboard
-        # repository.
-        # This is, unfortunately, the most difficult.
-        print
-        print "We are now going to set up a pasteboard. This is a mechanism"
-        print "for versioned posting of snippets, collaboration and"
-        print "discussion."
-        print
-        # Let's get the full list of repositories
-        pasteboard_name = "%s.bitbucket.org" % (bbusername.lower())
-        if repo_list is None:
-            rv = hgbb._bb_apicall(uu, "users/%s" % bbusername, None, False)
-            rv = json.loads(rv)
-            repo_list = rv['repositories']
-        create = True
-        for repo in repo_list:
-            if repo['name'] == pasteboard_name:
-                create = False
-        if create:
-            # Now we first create the repository, but we
-            # will only use the creation API, not the bbcreate command.
-            print
-            print "I am now going to create the repository:"
-            print "    ", pasteboard_name
-            print "on BitBucket.org.  This will set up the domain"
-            print "     http://%s" % (pasteboard_name)
-            print "which will point to the current contents of the repo."
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            data = dict(name=pasteboard_name)
-            hgbb._bb_apicall(uu, 'repositories', data)
-        # Now we clone
-        pasteboard_path = os.path.join(os.environ["YT_DEST"], "src",
-                                       pasteboard_name)
-        if os.path.isdir(pasteboard_path):
-            print "Found an existing clone of the pasteboard repo:"
-            print "    ", pasteboard_path
-        else:
-            print
-            print "I will now clone a copy of your pasteboard repo."
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            commands.clone(uu, "https://%s@bitbucket.org/%s/%s" % (
-                             bbusername, bbusername, pasteboard_name),
-                           pasteboard_path)
-            pbtemplate_path = os.path.join(supp_path, "pasteboard_template")
-            pb_hgrc_path = os.path.join(pasteboard_path, ".hg", "hgrc")
-            cedit.config.setoption(uu, [pb_hgrc_path],
-                                   "paths.pasteboard = " + pbtemplate_path)
-            if create:
-                # We have to pull in the changesets from the pasteboard.
-                pb_repo = hg.repository(uu, pasteboard_path)
-                commands.pull(uu, pb_repo,
-                              os.path.join(supp_path, "pasteboard_template"))
-        if ytcfg.get("yt","pasteboard_repo") != pasteboard_path:
-            print
-            print "Now setting the pasteboard_repo option in"
-            print "~/.yt/config to point to %s" % (pasteboard_path)
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            dotyt_path = os.path.expanduser("~/.yt")
-            if not os.path.isdir(dotyt_path):
-                print "There's no directory:"
-                print "    ", dotyt_path
-                print "I will now create it."
-                print
-                loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-                os.mkdir(dotyt_path)
-            ytcfg_path = os.path.expanduser("~/.yt/config")
-            cedit.config.setoption(uu, [ytcfg_path],
-                        "yt.pasteboard_repo=%s" % (pasteboard_path))
-        try:
-            import pygments
-            install_pygments = False
-        except ImportError:
-            install_pygments = True
-        if install_pygments:
-            print "You are missing the Pygments package.  Installing."
-            import pip
-            rv = pip.main(["install", "pygments"])
-            if rv == 1:
-                print "Unable to install Pygments.  Please report this bug to yt-users."
-                sys.exit(1)
         try:
             import lxml
             install_lxml = False
@@ -1188,27 +1137,32 @@
         print
         print "All done!"
         print
-        print "You're now set up to use the 'yt pasteboard' command"
-        print "as well as develop using Mercurial and BitBucket."
+        print "You're now set up to develop using Mercurial and BitBucket."
         print
         print "Good luck!"
 
-    @cmdln.option("-o", "--open-browser", action="store_true",
-                  default = False, dest='open_browser',
-                  help="Open a web browser.")
-    @cmdln.option("-p", "--port", action="store",
-                  default = 0, dest='port',
-                  help="Port to listen on")
-    @cmdln.option("-f", "--find", action="store_true",
-                  default = False, dest="find",
-                  help="At startup, find all *.hierarchy files in the CWD")
-    @cmdln.option("-d", "--debug", action="store_true",
-                  default = False, dest="debug",
-                  help="Add a debugging mode for cell execution")
-    def do_serve(self, subcmd, opts):
+class YTServeCmd(YTCommand):
+    name = "serve"
+    args = (
+            dict(short="-o", long="--open-browser", action="store_true",
+                 default = False, dest='open_browser',
+                 help="Open a web browser."),
+            dict(short="-p", long="--port", action="store",
+                 default = 0, dest='port',
+                 help="Port to listen on"),
+            dict(short="-f", long="--find", action="store_true",
+                 default = False, dest="find",
+                 help="At startup, find all *.hierarchy files in the CWD"),
+            dict(short="-d", long="--debug", action="store_true",
+                 default = False, dest="debug",
+                 help="Add a debugging mode for cell execution")
+            )
+    description = \
         """
         Run the Web GUI Reason
         """
+
+    def __call__(self, args):
         # We have to do a couple things.
         # First, we check that YT_DEST is set.
         if "YT_DEST" not in os.environ:
@@ -1217,18 +1171,18 @@
             print "*** to point to the installation location!        ***"
             print
             sys.exit(1)
-        if opts.port == 0:
+        if args.port == 0:
             # This means, choose one at random.  We do this by binding to a
             # socket and allowing the OS to choose the port for that socket.
             import socket
             sock = socket.socket()
             sock.bind(('', 0))
-            opts.port = sock.getsockname()[-1]
+            args.port = sock.getsockname()[-1]
             del sock
-        elif opts.port == '-1':
+        elif args.port == '-1':
             port = raw_input("Desired yt port? ")
             try:
-                opts.port = int(port)
+                args.port = int(port)
             except ValueError:
                 print "Please try a number next time."
                 return 1
@@ -1246,78 +1200,32 @@
         from yt.gui.reason.extdirect_repl import ExtDirectREPL
         from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
         hr = ExtDirectREPL(base_extjs_path)
-        hr.debug = PayloadHandler.debug = opts.debug
-        if opts.find:
+        hr.debug = PayloadHandler.debug = args.debug
+        if args.find:
             # We just have to find them and store references to them.
             command_line = ["pfs = []"]
             for fn in sorted(glob.glob("*/*.hierarchy")):
                 command_line.append("pfs.append(load('%s'))" % fn[:-10])
             hr.execute("\n".join(command_line))
         bottle.debug()
-        uuid_serve_functions(open_browser=opts.open_browser,
-                    port=int(opts.port), repl=hr)
+        uuid_serve_functions(open_browser=args.open_browser,
+                    port=int(args.port), repl=hr)
 
     
-    def _do_remote(self, subcmd, opts):
-        import getpass, sys, socket, time, webbrowser
-        import yt.utilities.pexpect as pex
-
-        host = raw_input('Hostname: ')
-        user = raw_input('User: ')
-        password = getpass.getpass('Password: ')
-
-        sock = socket.socket()
-        sock.bind(('', 0))
-        port = sock.getsockname()[-1]
-        del sock
-
-        child = pex.spawn('ssh -L %s:localhost:%s -l %s %s'%(port, port, user, host))
-        ssh_newkey = 'Are you sure you want to continue connecting'
-        i = child.expect([pex.TIMEOUT, ssh_newkey, 'password: '])
-        if i == 0: # Timeout
-            print 'ERROR!'
-            print 'SSH could not login. Here is what SSH said:'
-            print child.before, child.after
-            return 1
-        if i == 1: # SSH does not have the public key. Just accept it.
-            child.sendline ('yes')
-            child.expect ('password: ')
-            i = child.expect([pex.TIMEOUT, 'password: '])
-            if i == 0: # Timeout
-                print 'ERROR!'
-                print 'SSH could not login. Here is what SSH said:'
-                print child.before, child.after
-                return 1
-        print "Sending password"
-        child.sendline(password)
-        del password
-        print "Okay, sending serving command"
-        child.sendline('yt serve -p -1')
-        print "Waiting ..."
-        child.expect('Desired yt port?')
-        child.sendline("%s" % port)
-        child.expect('     http://localhost:([0-9]*)/(.+)/\r')
-        print "Got:", child.match.group(1), child.match.group(2)
-        port, urlprefix = child.match.group(1), child.match.group(2)
-        print "Sleeping one second and opening browser"
-        time.sleep(1)
-        webbrowser.open("http://localhost:%s/%s/" % (port, urlprefix))
-        print "Press Ctrl-C to terminate session"
-        child.readlines()
-        while 1:
-            time.sleep(1)
-
-    @cmdln.option("-R", "--repo", action="store", type="string",
-                  dest="repo", default=".", help="Repository to upload")
-    def do_hubsubmit(self, subcmd, opts):
+class YTHubSubmitCmd(YTCommand):
+    name = "hub_submit"
+    args = (
+            dict(long="--repo", action="store", type=str,
+                 dest="repo", default=".", help="Repository to upload"),
+           )
+    description = \
         """
         Submit a mercurial repository to the yt Hub
         (http://hub.yt-project.org/), creating a BitBucket repo in the process
         if necessary.
+        """
 
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
+    def __call__(self, args):
         import imp
         from mercurial import hg, ui, commands, error, config
         uri = "http://hub.yt-project.org/3rdparty/API/api.php"
@@ -1340,10 +1248,10 @@
             sys.exit(1)
         hgbb = imp.load_module("hgbb", *result)
         try:
-            repo = hg.repository(uu, opts.repo)
+            repo = hg.repository(uu, args.repo)
             conf = config.config()
-            if os.path.exists(os.path.join(opts.repo,".hg","hgrc")):
-                conf.read(os.path.join(opts.repo, ".hg", "hgrc"))
+            if os.path.exists(os.path.join(args.repo,".hg","hgrc")):
+                conf.read(os.path.join(args.repo, ".hg", "hgrc"))
             needs_bb = True
             if "paths" in conf.sections():
                 default = conf['paths'].get("default", "")
@@ -1358,7 +1266,7 @@
                             break
         except error.RepoError:
             print "Unable to find repo at:"
-            print "   %s" % (os.path.abspath(opts.repo))
+            print "   %s" % (os.path.abspath(args.repo))
             print
             print "Would you like to initialize one?  If this message"
             print "surprises you, you should perhaps press Ctrl-C to quit."
@@ -1369,8 +1277,8 @@
                 print "Okay, rad -- we'll let you handle it and get back to",
                 print " us."
                 return 1
-            commands.init(uu, dest=opts.repo)
-            repo = hg.repository(uu, opts.repo)
+            commands.init(uu, dest=args.repo)
+            repo = hg.repository(uu, args.repo)
             commands.add(uu, repo)
             commands.commit(uu, repo, message="Initial automated import by yt")
             needs_bb = True
@@ -1395,7 +1303,7 @@
                 print
                 print "to get set up and ready to go."
                 return 1
-            bb_repo_name = os.path.basename(os.path.abspath(opts.repo))
+            bb_repo_name = os.path.basename(os.path.abspath(args.repo))
             print
             print "I am now going to create the repository:"
             print "    ", bb_repo_name
@@ -1478,13 +1386,16 @@
         rv = urllib2.urlopen(req).read()
         print rv
 
-    def do_upload_image(self, subcmd, opts, filename):
+class YTUploadImageCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
         """
         Upload an image to imgur.com.  Must be PNG.
 
-        ${cmd_usage} 
-        ${cmd_option_list}
         """
+    name = "upload_image"
+    def __call__(self, args):
+        filename = args.file
         if not filename.endswith(".png"):
             print "File must be a PNG file!"
             return 1
@@ -1516,56 +1427,57 @@
             print
             pprint.pprint(rv)
 
-    @add_cmd_options(["width", "unit", "center","enhance",'outputfn',
-                      "field", "cmap", "contours", "viewpoint",
-                      "pixels","up","valrange","log","contour_width"])
-    @check_args
-    def do_render(self, subcmd, opts, arg):
+class YTRenderCmd(YTCommand):
+        
+    args = ("width", "unit", "center","enhance",'outputfn',
+            "field", "cmap", "contours", "viewpoint",
+            "pixels","up","valrange","log","contour_width", "pf")
+    name = "render"
+    description = \
         """
         Create a simple volume rendering
+        """
 
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-        pf = _fix_pf(arg)
-        center = opts.center
-        if opts.center == (-1,-1,-1):
+    def __call__(self, args):
+        pf = args.pf
+        center = args.center
+        if args.center == (-1,-1,-1):
             mylog.info("No center fed in; seeking.")
             v, center = pf.h.find_max("Density")
-        elif opts.center is None:
+        elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = na.array(center)
 
-        L = opts.viewpoint
+        L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(opts.viewpoint)
+        L = na.array(args.viewpoint)
 
-        unit = opts.unit
+        unit = args.unit
         if unit is None:
             unit = '1'
-        width = opts.width
+        width = args.width
         if width is None:
             width = 0.5*(pf.domain_right_edge - pf.domain_left_edge)
         width /= pf[unit]
 
-        N = opts.pixels
+        N = args.pixels
         if N is None:
             N = 512 
         
-        up = opts.up
+        up = args.up
         if up is None:
             up = [0.,0.,1.]
             
-        field = opts.field
+        field = args.field
         if field is None:
             field = 'Density'
         
-        log = opts.takelog
+        log = args.takelog
         if log is None:
             log = True
 
-        myrange = opts.valrange
+        myrange = args.valrange
         if myrange is None:
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
@@ -1574,13 +1486,13 @@
         else:
             mi, ma = myrange[0], myrange[1]
 
-        n_contours = opts.contours
+        n_contours = args.contours
         if n_contours is None:
             n_contours = 7
 
-        contour_width = opts.contour_width
+        contour_width = args.contour_width
 
-        cmap = opts.cmap
+        cmap = args.cmap
         if cmap is None:
             cmap = 'jet'
         tf = ColorTransferFunction((mi-2, ma+2))
@@ -1589,12 +1501,12 @@
         cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf)
         image = cam.snapshot()
 
-        if opts.enhance:
+        if args.enhance:
             for i in range(3):
                 image[:,:,i] = image[:,:,i]/(image[:,:,i].mean() + 5.*image[:,:,i].std())
             image[image>1.0]=1.0
             
-        save_name = opts.output
+        save_name = args.output
         if save_name is None:
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
@@ -1604,9 +1516,7 @@
         
 
 def run_main():
-    for co in ["--parallel", "--paste"]:
-        if co in sys.argv: del sys.argv[sys.argv.index(co)]
-    YT = YTCommands()
-    sys.exit(YT.main())
+    args = parser.parse_args()
+    args.func(args)
 
 if __name__ == "__main__": run_main()


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -515,3 +515,107 @@
     vec2 /= norm2
     vec3 = na.cross(vec1, vec2)
     return vec1, vec2, vec3
+
+def quartiles(a, axis=None, out=None, overwrite_input=False):
+    """
+    Compute the quartile values (25% and 75%) along the specified axis
+    in the same way that the numpy.median calculates the median (50%) value
+    alone a specified axis.  Check numpy.median for details, as it is
+    virtually the same algorithm.
+
+    Returns an array of the quartiles of the array elements [lower quartile, 
+    upper quartile].
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : {None, int}, optional
+        Axis along which the quartiles are computed. The default (axis=None)
+        is to compute the quartiles along a flattened version of the array.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output,
+        but the type (of the output) will be cast if necessary.
+    overwrite_input : {False, True}, optional
+       If True, then allow use of memory of input array (a) for
+       calculations. The input array will be modified by the call to
+       quartiles. This will save memory when you do not need to preserve
+       the contents of the input array. Treat the input as undefined,
+       but it will probably be fully or partially sorted. Default is
+       False. Note that, if `overwrite_input` is True and the input
+       is not already an ndarray, an error will be raised.
+
+    Returns
+    -------
+    quartiles : ndarray
+        A new 2D array holding the result (unless `out` is specified, in
+        which case that array is returned instead).  If the input contains
+        integers, or floats of smaller precision than 64, then the output
+        data-type is float64.  Otherwise, the output data-type is the same
+        as that of the input.
+
+    See Also
+    --------
+    numpy.median, numpy.mean, numpy.percentile
+
+    Notes
+    -----
+    Given a vector V of length N, the quartiles of V are the 25% and 75% values 
+    of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/4]`` and 
+    ``3*V_sorted[(N-1)/4]``, when N is odd.  When N is even, it is the average 
+    of the two values bounding these values of ``V_sorted``.
+
+    Examples
+    --------
+    >>> a = na.arange(100).reshape(10,10)
+    >>> a
+    array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
+           [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
+           [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+           [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
+           [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
+           [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
+           [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
+           [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
+           [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
+           [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
+    >>> mu.quartiles(a)
+    array([ 24.5,  74.5])
+    >>> mu.quartiles(a,axis=0)
+    array([[ 15.,  16.,  17.,  18.,  19.,  20.,  21.,  22.,  23.,  24.],
+           [ 65.,  66.,  67.,  68.,  69.,  70.,  71.,  72.,  73.,  74.]])
+    >>> mu.quartiles(a,axis=1)
+    array([[  1.5,  11.5,  21.5,  31.5,  41.5,  51.5,  61.5,  71.5,  81.5,
+             91.5],
+           [  6.5,  16.5,  26.5,  36.5,  46.5,  56.5,  66.5,  76.5,  86.5,
+             96.5]])
+    """
+    if overwrite_input:
+        if axis is None:
+            sorted = a.ravel()
+            sorted.sort()
+        else:
+            a.sort(axis=axis)
+            sorted = a
+    else:
+        sorted = na.sort(a, axis=axis)
+    if axis is None:
+        axis = 0
+    indexer = [slice(None)] * sorted.ndim
+    indices = [int(sorted.shape[axis]/4), int(sorted.shape[axis]*.75)]
+    result = []
+    for index in indices:
+        if sorted.shape[axis] % 2 == 1:
+            # index with slice to allow mean (below) to work
+            indexer[axis] = slice(index, index+1)
+        else:
+            indexer[axis] = slice(index-1, index+1)
+        # special cases for small arrays
+        if sorted.shape[axis] == 2:
+            # index with slice to allow mean (below) to work
+            indexer[axis] = slice(index, index+1)
+        # Use mean in odd and even case to coerce data type
+        # and check, use out array.
+        result.append(na.mean(sorted[indexer], axis=axis, out=out))
+    return na.array(result)


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/minimal_representation.py
--- /dev/null
+++ b/yt/utilities/minimal_representation.py
@@ -0,0 +1,106 @@
+"""
+Skeleton objects that represent a few fundamental yt data types.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import abc
+
+class ContainerClass(object):
+    pass
+
+class MinimalRepresentation(object):
+    __metaclass__ = abc.ABCMeta
+
+    def _update_attrs(self, obj, attr_list):
+        for attr in attr_list:
+            setattr(self, attr, getattr(obj, attr, None))
+        if hasattr(obj, "pf"):
+            self.output_hash = obj.pf._hash()
+
+    def __init__(self, obj):
+        self._update_attrs(obj, self._attr_list)
+
+    @abc.abstractmethod
+    def _generate_post(self):
+        pass
+
+    @abc.abstractproperty
+    def _attr_list(self):
+        pass
+
+    def _return_filtered_object(self, attrs):
+        new_attrs = tuple(attr for attr in self._attr_list
+                          if attr not in attrs)
+        new_class = type('Filtered%s' % self.__class__.__name__,
+                         (FilteredRepresentation,),
+                         {'_attr_list': new_attrs})
+        return new_class(self)
+
+    @property
+    def _attrs(self):
+        return dict( ((attr, getattr(self, attr)) for attr in self._attr_list) )
+
+    @classmethod
+    def _from_metadata(cls, metadata):
+        cc = ContainerClass()
+        for a, v in metadata.values():
+            setattr(cc, a, v)
+        return cls(cc)
+
+class FilteredRepresentation(MinimalRepresentation):
+    def _generate_post(self):
+        raise RuntimeError
+
+class MinimalStaticOutput(MinimalRepresentation):
+    _attr_list = ("dimensionality", "refine_by", "domain_dimensions",
+                  "current_time", "domain_left_edge", "domain_right_edge",
+                  "unique_identifier", "current_redshift", "output_hash",
+                  "cosmological_simulation", "omega_matter", "omega_lambda",
+                  "hubble_constant", "name")
+
+    def __init__(self, obj):
+        super(MinimalStaticOutput, self).__init__(obj)
+        self.output_hash = obj._hash()
+        self.name = str(obj)
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = []
+        return metadata, chunks
+
+class MinimalMappableData(MinimalRepresentation):
+
+    weight = "None"
+    _attr_list = ("field_data", "field", "weight", "axis", "output_hash")
+
+    def _generate_post(self):
+        nobj = self._return_filtered_object(("field_data",))
+        metadata = nobj._attrs
+        chunks = [(arr, self.field_data[arr]) for arr in self.field_data]
+        return (metadata, chunks)
+
+class MinimalProjectionData(MinimalMappableData):
+
+    def __init__(self, obj):
+        super(MinimalProjectionData, self).__init__(obj)
+        self.type = "proj"


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -39,49 +39,23 @@
 from yt.utilities.amr_utils import \
     QuadTree, merge_quadtrees
 
-exe_name = os.path.basename(sys.executable)
-# At import time, we determined whether or not we're being run in parallel.
-if exe_name in \
-        ["mpi4py", "embed_enzo",
-         "python"+sys.version[:3]+"-mpi"] \
-    or "--parallel" in sys.argv or '_parallel' in dir(sys) \
-    or any(["ipengine" in arg for arg in sys.argv]):
+parallel_capable = ytcfg.getboolean("yt", "__parallel")
+
+# Set up translation table and import things
+if parallel_capable:
     from mpi4py import MPI
-    parallel_capable = (MPI.COMM_WORLD.size > 1)
-    if parallel_capable:
-        mylog.info("Global parallel computation enabled: %s / %s",
-                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel"] = "True"
-        if exe_name == "embed_enzo" or \
-            ("_parallel" in dir(sys) and sys._parallel == True):
-            ytcfg["yt","inline"] = "True"
-        # I believe we do not need to turn this off manually
-        #ytcfg["yt","StoreParameterFiles"] = "False"
-        # Now let's make sure we have the right options set.
-        if MPI.COMM_WORLD.rank > 0:
-            if ytcfg.getboolean("yt","LogFile"):
-                ytcfg["yt","LogFile"] = "False"
-                yt.utilities.logger.disable_file_logging()
-        yt.utilities.logger.uncolorize_logging()
-        # Even though the uncolorize function already resets the format string,
-        # we reset it again so that it includes the processor.
-        f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
-                                            yt.utilities.logger.ufstring))
-        if len(yt.utilities.logger.rootLogger.handlers) > 0:
-            yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
-        if ytcfg.getboolean("yt", "parallel_traceback"):
-            sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
+    yt.utilities.logger.uncolorize_logging()
+    # Even though the uncolorize function already resets the format string,
+    # we reset it again so that it includes the processor.
+    f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
+                                        yt.utilities.logger.ufstring))
+    if len(yt.utilities.logger.rootLogger.handlers) > 0:
+        yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
+    if ytcfg.getboolean("yt", "parallel_traceback"):
+        sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
     if ytcfg.getint("yt","LogLevel") < 20:
         yt.utilities.logger.ytLogger.warning(
           "Log Level is set low -- this could affect parallel performance!")
-
-else:
-    parallel_capable = False
-
-# Set up translation table
-if parallel_capable:
     dtype_names = dict(
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
@@ -374,7 +348,8 @@
             to_share[rstore.result_id] = rstore.result
         else:
             yield obj
-    communication_system.communicators.pop()
+    if parallel_capable:
+        communication_system.communicators.pop()
     if storage is not None:
         # Now we have to broadcast it
         new_storage = my_communicator.par_combine_object(


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/pasteboard.py
--- a/yt/utilities/pasteboard.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from mercurial import ui, repo, commands, hg
-import json
-import os
-import time
-import uuid
-import urllib
-
-from yt.config import ytcfg
-
-def _get_last_mod(filectx):
-    rev = filectx.filectx(filectx.filerev())
-    return rev
-
-class PostInventory(object):
-    def __init__(self, uu = None, repo_fn = None):
-        if uu is None: uu = ui.ui()
-        if repo_fn is None: repo_fn = ytcfg.get("yt","pasteboard_repo")
-        if repo_fn == '':
-            raise KeyError("~/.yt/config:[yt]pasteboard_repo")
-        self.repo_fn = repo_fn
-        self.bbrepo = hg.repository(uu, repo_fn)
-        config_fn = os.path.join(repo_fn, ".hg", "hgrc")
-        uu.readconfig(config_fn)
-        commands.pull(uu, self.bbrepo)
-        commands.update(uu, self.bbrepo, clean=True)
-        if not os.path.exists(os.path.join(repo_fn, "posts")):
-            os.makedirs(os.path.join(repo_fn, "posts"))
-        if not os.path.exists(os.path.join(repo_fn, "html")):
-            os.makedirs(os.path.join(repo_fn, "html"))
-        self.uu = uu
-
-    def regenerate_posts(self):
-        self.posts = []
-        for file in self.bbrepo["tip"]:
-            if file.startswith("posts/") and file.count("/") == 1 \
-               and not file.endswith(".desc"):
-                filectx = self.bbrepo["tip"][file]
-                last_mod = _get_last_mod(filectx).date()
-                self.posts.append((last_mod[0] + last_mod[1], file))
-        self.posts.sort()
-        self.posts = self.posts[::-1]
-
-    def add_post(self, filename, desc = None,
-                 uu = None, highlight = True, push = True):
-        # We assume the post filename exists in the current space
-        self.regenerate_posts()
-        if uu is None: uu = self.uu
-        prefix = uuid.uuid4()
-        name = "%s-%s" % (prefix, os.path.basename(filename))
-        name_noext = name.replace(".","-")
-        hfn = "html/%s.html" % (name_noext)
-        pfn = "posts/%s" % (name)
-        abs_pfn = os.path.join(self.repo_fn, pfn)
-        abs_hfn = os.path.join(self.repo_fn, hfn)
-        if desc is not None:
-            open(abs_pfn + ".desc", "w").write(desc)
-        self.posts.insert(0, (int(time.time()), "posts/%s" % name))
-        if not os.path.exists(abs_pfn):
-            open(abs_pfn,"w").write(open(filename).read())
-        inv_fname = self.update_inventory()
-        if highlight and not name.endswith(".html"):
-            from pygments.cmdline import main as pygmain
-            rv = pygmain(["pygmentize", "-o", abs_hfn,
-                          "-O", "full", abs_pfn])
-        if not highlight or rv:
-            content = open(abs_pfn).read()
-            open(abs_hfn, "w").write(
-                "<HTML><BODY><PRE>" + content + "</PRE></BODY></HTML>")
-        to_manage = [abs_pfn, abs_hfn]
-        if desc is not None: to_manage.append(abs_pfn + ".desc")
-        commands.add(uu, self.bbrepo, *to_manage)
-        commands.commit(uu, self.bbrepo, *(to_manage + [inv_fname]),
-                        message="Adding %s" % name)
-        if push: commands.push(uu, self.bbrepo)
-
-    def update_inventory(self):
-        tip = self.bbrepo["tip"]
-        vals = []
-        for t, pfn in self.posts:
-            dfn = pfn + ".desc"
-            if dfn in tip:
-                d = tip[dfn].data()
-                last_mod =_get_last_mod(tip[dfn])
-                last_hash = last_mod.hex()
-                uname = last_mod.user()
-            elif pfn not in tip:
-                abs_pfn = os.path.join(self.repo_fn, pfn)
-                uname = self.uu.config("ui","username")
-                if os.path.exists(abs_pfn + ".desc"):
-                    d = open(abs_pfn + ".desc").read()
-                else:
-                    d = open(abs_pfn).read()
-                last_hash = "tip"
-            else:
-                d = tip[pfn].data()
-                last_mod = _get_last_mod(tip[pfn])
-                last_hash = last_mod.hex()
-                uname = last_mod.user()
-            if len(d) > 80: d = d[:77] + "..."
-            name_noext = pfn[6:].replace(".","-")
-            vals.append(dict(modified = time.ctime(t),
-                             modtime = t,
-                             lastmod_hash = last_hash,
-                             fullname = pfn,
-                             htmlname = "html/%s.html" % name_noext,
-                             name = pfn[43:], # 6 for posts/ then 36 for UUID
-                             username = uname,
-                             descr = d)) 
-        fn = os.path.join(self.repo_fn, "inventory.json")
-        f = open(fn, "w")
-        f.write("var inventory_data = ")
-        json.dump(vals, f, indent = 1)
-        f.write(";")
-        return fn
-
-def retrieve_pastefile(username, paste_id, output_fn = None):
-    # First we get the username's inventory.json
-    s = urllib.urlopen("http://%s.bitbucket.org/inventory.json" % (username))
-    data = s.read()
-    # This is an ugly, ugly hack for my lack of understanding of how best to
-    # handle this JSON stuff.
-    data = data[data.find("=")+1:data.rfind(";")] 
-    #import pdb;pdb.set_trace()
-    inv = json.loads(data)
-    k = None
-    if len(paste_id) == 36:
-        # Then this is a UUID
-        for k in inv:
-            if k['fullname'][6:42] == paste_id: break
-    elif len(paste_id) == 10:
-        pp = int(paste_id)
-        for k in inv:
-            if k['modtime'] == pp: break
-    if k is None: raise KeyError(k)
-    # k is our key
-    url = "http://%s.bitbucket.org/%s" % (username, k['fullname'])
-    s = urllib.urlopen(url)
-    data = s.read()
-    if output_fn is not None:
-        if os.path.exists(output_fn): raise IOError(output_fn)
-        open(output_fn, "w").write(data)
-    else:
-        print data


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/utilities/pexpect.py
--- a/yt/utilities/pexpect.py
+++ /dev/null
@@ -1,1845 +0,0 @@
-"""Pexpect is a Python module for spawning child applications and controlling
-them automatically. Pexpect can be used for automating interactive applications
-such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
-scripts for duplicating software package installations on different servers. It
-can be used for automated software testing. Pexpect is in the spirit of Don
-Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
-require TCL and Expect or require C extensions to be compiled. Pexpect does not
-use C, Expect, or TCL extensions. It should work on any platform that supports
-the standard Python pty module. The Pexpect interface focuses on ease of use so
-that simple tasks are easy.
-
-There are two main interfaces to Pexpect -- the function, run() and the class,
-spawn. You can call the run() function to execute a command and return the
-output. This is a handy replacement for os.system().
-
-For example::
-
-    pexpect.run('ls -la')
-
-The more powerful interface is the spawn class. You can use this to spawn an
-external child command and then interact with the child by sending lines and
-expecting responses.
-
-For example::
-
-    child = pexpect.spawn('scp foo myname at host.example.com:.')
-    child.expect ('Password:')
-    child.sendline (mypassword)
-
-This works even for commands that ask for passwords or other input outside of
-the normal stdio streams.
-
-Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
-Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
-vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
-Geoffrey Marshall, Francisco Lourenco, Glen Mabey, Karthik Gurusamy, Fernando
-Perez, Corey Minyard, Jon Cohen, Guillaume Chazarain, Andrew Ryan, Nick
-Craig-Wood, Andrew Stone, Jorgen Grahn (Let me know if I forgot anyone.)
-
-Free, open source, and all that good stuff.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-Pexpect Copyright (c) 2008 Noah Spurrier
-http://pexpect.sourceforge.net/
-
-$Id: pexpect.py 507 2007-12-27 02:40:52Z noah $
-"""
-
-try:
-    import os, sys, time
-    import select
-    import string
-    import re
-    import struct
-    import resource
-    import types
-    import pty
-    import tty
-    import termios
-    import fcntl
-    import errno
-    import traceback
-    import signal
-except ImportError, e:
-    raise ImportError (str(e) + """
-
-A critical module was not found. Probably this operating system does not
-support it. Pexpect is intended for UNIX-like operating systems.""")
-
-__version__ = '2.3'
-__revision__ = '$Revision: 399 $'
-__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
-    'split_command_line', '__version__', '__revision__']
-
-# Exception classes used by this module.
-class ExceptionPexpect(Exception):
-
-    """Base class for all exceptions raised by this module.
-    """
-
-    def __init__(self, value):
-
-        self.value = value
-
-    def __str__(self):
-
-        return str(self.value)
-
-    def get_trace(self):
-
-        """This returns an abbreviated stack trace with lines that only concern
-        the caller. In other words, the stack trace inside the Pexpect module
-        is not included. """
-
-        tblist = traceback.extract_tb(sys.exc_info()[2])
-        #tblist = filter(self.__filter_not_pexpect, tblist)
-        tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
-        tblist = traceback.format_list(tblist)
-        return ''.join(tblist)
-
-    def __filter_not_pexpect(self, trace_list_item):
-
-        """This returns True if list item 0 the string 'pexpect.py' in it. """
-
-        if trace_list_item[0].find('pexpect.py') == -1:
-            return True
-        else:
-            return False
-
-class EOF(ExceptionPexpect):
-
-    """Raised when EOF is read from a child. This usually means the child has exited."""
-
-class TIMEOUT(ExceptionPexpect):
-
-    """Raised when a read time exceeds the timeout. """
-
-##class TIMEOUT_PATTERN(TIMEOUT):
-##    """Raised when the pattern match time exceeds the timeout.
-##    This is different than a read TIMEOUT because the child process may
-##    give output, thus never give a TIMEOUT, but the output
-##    may never match a pattern.
-##    """
-##class MAXBUFFER(ExceptionPexpect):
-##    """Raised when a scan buffer fills before matching an expected pattern."""
-
-def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None):
-
-    """
-    This function runs the given command; waits for it to finish; then
-    returns all output as a string. STDERR is included in output. If the full
-    path to the command is not given then the path is searched.
-
-    Note that lines are terminated by CR/LF (\\r\\n) combination even on
-    UNIX-like systems because this is the standard for pseudo ttys. If you set
-    'withexitstatus' to true, then run will return a tuple of (command_output,
-    exitstatus). If 'withexitstatus' is false then this returns just
-    command_output.
-
-    The run() function can often be used instead of creating a spawn instance.
-    For example, the following code uses spawn::
-
-        from pexpect import *
-        child = spawn('scp foo myname at host.example.com:.')
-        child.expect ('(?i)password')
-        child.sendline (mypassword)
-
-    The previous code can be replace with the following::
-
-        from pexpect import *
-        run ('scp foo myname at host.example.com:.', events={'(?i)password': mypassword})
-
-    Examples
-    ========
-
-    Start the apache daemon on the local machine::
-
-        from pexpect import *
-        run ("/usr/local/apache/bin/apachectl start")
-
-    Check in a file using SVN::
-
-        from pexpect import *
-        run ("svn ci -m 'automatic commit' my_file.py")
-
-    Run a command and capture exit status::
-
-        from pexpect import *
-        (command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
-
-    Tricky Examples
-    ===============
-
-    The following will run SSH and execute 'ls -l' on the remote machine. The
-    password 'secret' will be sent if the '(?i)password' pattern is ever seen::
-
-        run ("ssh username at machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
-
-    This will start mencoder to rip a video from DVD. This will also display
-    progress ticks every 5 seconds as it runs. For example::
-
-        from pexpect import *
-        def print_ticks(d):
-            print d['event_count'],
-        run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
-
-    The 'events' argument should be a dictionary of patterns and responses.
-    Whenever one of the patterns is seen in the command out run() will send the
-    associated response string. Note that you should put newlines in your
-    string if Enter is necessary. The responses may also contain callback
-    functions. Any callback is function that takes a dictionary as an argument.
-    The dictionary contains all the locals from the run() function, so you can
-    access the child spawn object or any other variable defined in run()
-    (event_count, child, and extra_args are the most useful). A callback may
-    return True to stop the current run process otherwise run() continues until
-    the next event. A callback may also return a string which will be sent to
-    the child. 'extra_args' is not used by directly run(). It provides a way to
-    pass data to a callback function through run() through the locals
-    dictionary passed to a callback. """
-
-    if timeout == -1:
-        child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
-    else:
-        child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env)
-    if events is not None:
-        patterns = events.keys()
-        responses = events.values()
-    else:
-        patterns=None # We assume that EOF or TIMEOUT will save us.
-        responses=None
-    child_result_list = []
-    event_count = 0
-    while 1:
-        try:
-            index = child.expect (patterns)
-            if type(child.after) in types.StringTypes:
-                child_result_list.append(child.before + child.after)
-            else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
-                child_result_list.append(child.before)
-            if type(responses[index]) in types.StringTypes:
-                child.send(responses[index])
-            elif type(responses[index]) is types.FunctionType:
-                callback_result = responses[index](locals())
-                sys.stdout.flush()
-                if type(callback_result) in types.StringTypes:
-                    child.send(callback_result)
-                elif callback_result:
-                    break
-            else:
-                raise TypeError ('The callback must be a string or function type.')
-            event_count = event_count + 1
-        except TIMEOUT, e:
-            child_result_list.append(child.before)
-            break
-        except EOF, e:
-            child_result_list.append(child.before)
-            break
-    child_result = ''.join(child_result_list)
-    if withexitstatus:
-        child.close()
-        return (child_result, child.exitstatus)
-    else:
-        return child_result
-
-class spawn (object):
-
-    """This is the main class interface for Pexpect. Use this class to start
-    and control child applications. """
-
-    def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
-
-        """This is the constructor. The command parameter may be a string that
-        includes a command and any arguments to the command. For example::
-
-            child = pexpect.spawn ('/usr/bin/ftp')
-            child = pexpect.spawn ('/usr/bin/ssh user at example.com')
-            child = pexpect.spawn ('ls -latr /tmp')
-
-        You may also construct it with a list of arguments like so::
-
-            child = pexpect.spawn ('/usr/bin/ftp', [])
-            child = pexpect.spawn ('/usr/bin/ssh', ['user at example.com'])
-            child = pexpect.spawn ('ls', ['-latr', '/tmp'])
-
-        After this the child application will be created and will be ready to
-        talk to. For normal use, see expect() and send() and sendline().
-
-        Remember that Pexpect does NOT interpret shell meta characters such as
-        redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
-        If you want to run a command and pipe it through another command then
-        you must also start a shell. For example::
-
-            child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
-            child.expect(pexpect.EOF)
-
-        The second form of spawn (where you pass a list of arguments) is useful
-        in situations where you wish to spawn a command and pass it its own
-        argument list. This can make syntax more clear. For example, the
-        following is equivalent to the previous example::
-
-            shell_cmd = 'ls -l | grep LOG > log_list.txt'
-            child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
-            child.expect(pexpect.EOF)
-
-        The maxread attribute sets the read buffer size. This is maximum number
-        of bytes that Pexpect will try to read from a TTY at one time. Setting
-        the maxread size to 1 will turn off buffering. Setting the maxread
-        value higher may help performance in cases where large amounts of
-        output are read back from the child. This feature is useful in
-        conjunction with searchwindowsize.
-
-        The searchwindowsize attribute sets the how far back in the incomming
-        seach buffer Pexpect will search for pattern matches. Every time
-        Pexpect reads some data from the child it will append the data to the
-        incomming buffer. The default is to search from the beginning of the
-        imcomming buffer each time new data is read from the child. But this is
-        very inefficient if you are running a command that generates a large
-        amount of data where you want to match The searchwindowsize does not
-        effect the size of the incomming data buffer. You will still have
-        access to the full buffer after expect() returns.
-
-        The logfile member turns on or off logging. All input and output will
-        be copied to the given file object. Set logfile to None to stop
-        logging. This is the default. Set logfile to sys.stdout to echo
-        everything to standard output. The logfile is flushed after each write.
-
-        Example log input and output to a file::
-
-            child = pexpect.spawn('some_command')
-            fout = file('mylog.txt','w')
-            child.logfile = fout
-
-        Example log to stdout::
-
-            child = pexpect.spawn('some_command')
-            child.logfile = sys.stdout
-
-        The logfile_read and logfile_send members can be used to separately log
-        the input from the child and output sent to the child. Sometimes you
-        don't want to see everything you write to the child. You only want to
-        log what the child sends back. For example::
-        
-            child = pexpect.spawn('some_command')
-            child.logfile_read = sys.stdout
-
-        To separately log output sent to the child use logfile_send::
-        
-            self.logfile_send = fout
-
-        The delaybeforesend helps overcome a weird behavior that many users
-        were experiencing. The typical problem was that a user would expect() a
-        "Password:" prompt and then immediately call sendline() to send the
-        password. The user would then see that their password was echoed back
-        to them. Passwords don't normally echo. The problem is caused by the
-        fact that most applications print out the "Password" prompt and then
-        turn off stdin echo, but if you send your password before the
-        application turned off echo, then you get your password echoed.
-        Normally this wouldn't be a problem when interacting with a human at a
-        real keyboard. If you introduce a slight delay just before writing then
-        this seems to clear up the problem. This was such a common problem for
-        many users that I decided that the default pexpect behavior should be
-        to sleep just before writing to the child application. 1/20th of a
-        second (50 ms) seems to be enough to clear up the problem. You can set
-        delaybeforesend to 0 to return to the old behavior. Most Linux machines
-        don't like this to be below 0.03. I don't know why.
-
-        Note that spawn is clever about finding commands on your path.
-        It uses the same logic that "which" uses to find executables.
-
-        If you wish to get the exit status of the child you must call the
-        close() method. The exit or signal status of the child will be stored
-        in self.exitstatus or self.signalstatus. If the child exited normally
-        then exitstatus will store the exit return code and signalstatus will
-        be None. If the child was terminated abnormally with a signal then
-        signalstatus will store the signal value and exitstatus will be None.
-        If you need more detail you can also read the self.status member which
-        stores the status returned by os.waitpid. You can interpret this using
-        os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
-
-        self.STDIN_FILENO = pty.STDIN_FILENO
-        self.STDOUT_FILENO = pty.STDOUT_FILENO
-        self.STDERR_FILENO = pty.STDERR_FILENO
-        self.stdin = sys.stdin
-        self.stdout = sys.stdout
-        self.stderr = sys.stderr
-
-        self.searcher = None
-        self.ignorecase = False
-        self.before = None
-        self.after = None
-        self.match = None
-        self.match_index = None
-        self.terminated = True
-        self.exitstatus = None
-        self.signalstatus = None
-        self.status = None # status returned by os.waitpid
-        self.flag_eof = False
-        self.pid = None
-        self.child_fd = -1 # initially closed
-        self.timeout = timeout
-        self.delimiter = EOF
-        self.logfile = logfile
-        self.logfile_read = None # input from child (read_nonblocking)
-        self.logfile_send = None # output to send (send, sendline)
-        self.maxread = maxread # max bytes to read at one time into buffer
-        self.buffer = '' # This is the read buffer. See maxread.
-        self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
-        # Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
-        self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
-        self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
-        self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
-        self.softspace = False # File-like object.
-        self.name = '<' + repr(self) + '>' # File-like object.
-        self.encoding = None # File-like object.
-        self.closed = True # File-like object.
-        self.cwd = cwd
-        self.env = env
-        self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
-        # Solaris uses internal __fork_pty(). All others use pty.fork().
-        if (sys.platform.lower().find('solaris')>=0) or (sys.platform.lower().find('sunos5')>=0):
-            self.use_native_pty_fork = False
-        else:
-            self.use_native_pty_fork = True
-
-
-        # allow dummy instances for subclasses that may not use command or args.
-        if command is None:
-            self.command = None
-            self.args = None
-            self.name = '<pexpect factory incomplete>'
-        else:
-            self._spawn (command, args)
-
-    def __del__(self):
-
-        """This makes sure that no system resources are left open. Python only
-        garbage collects Python objects. OS file descriptors are not Python
-        objects, so they must be handled explicitly. If the child file
-        descriptor was opened outside of this class (passed to the constructor)
-        then this does not close it. """
-
-        if not self.closed:
-            # It is possible for __del__ methods to execute during the
-            # teardown of the Python VM itself. Thus self.close() may
-            # trigger an exception because os.close may be None.
-            # -- Fernando Perez
-            try:
-                self.close()
-            except AttributeError:
-                pass
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object. """
-
-        s = []
-        s.append(repr(self))
-        s.append('version: ' + __version__ + ' (' + __revision__ + ')')
-        s.append('command: ' + str(self.command))
-        s.append('args: ' + str(self.args))
-        s.append('searcher: ' + str(self.searcher))
-        s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
-        s.append('before (last 100 chars): ' + str(self.before)[-100:])
-        s.append('after: ' + str(self.after))
-        s.append('match: ' + str(self.match))
-        s.append('match_index: ' + str(self.match_index))
-        s.append('exitstatus: ' + str(self.exitstatus))
-        s.append('flag_eof: ' + str(self.flag_eof))
-        s.append('pid: ' + str(self.pid))
-        s.append('child_fd: ' + str(self.child_fd))
-        s.append('closed: ' + str(self.closed))
-        s.append('timeout: ' + str(self.timeout))
-        s.append('delimiter: ' + str(self.delimiter))
-        s.append('logfile: ' + str(self.logfile))
-        s.append('logfile_read: ' + str(self.logfile_read))
-        s.append('logfile_send: ' + str(self.logfile_send))
-        s.append('maxread: ' + str(self.maxread))
-        s.append('ignorecase: ' + str(self.ignorecase))
-        s.append('searchwindowsize: ' + str(self.searchwindowsize))
-        s.append('delaybeforesend: ' + str(self.delaybeforesend))
-        s.append('delayafterclose: ' + str(self.delayafterclose))
-        s.append('delayafterterminate: ' + str(self.delayafterterminate))
-        return '\n'.join(s)
-
-    def _spawn(self,command,args=[]):
-
-        """This starts the given command in a child process. This does all the
-        fork/exec type of stuff for a pty. This is called by __init__. If args
-        is empty then command will be parsed (split on spaces) and args will be
-        set to parsed arguments. """
-
-        # The pid and child_fd of this object get set by this method.
-        # Note that it is difficult for this method to fail.
-        # You cannot detect if the child process cannot start.
-        # So the only way you can tell if the child process started
-        # or not is to try to read from the file descriptor. If you get
-        # EOF immediately then it means that the child is already dead.
-        # That may not necessarily be bad because you may haved spawned a child
-        # that performs some task; creates no stdout output; and then dies.
-
-        # If command is an int type then it may represent a file descriptor.
-        if type(command) == type(0):
-            raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
-
-        if type (args) != type([]):
-            raise TypeError ('The argument, args, must be a list.')
-
-        if args == []:
-            self.args = split_command_line(command)
-            self.command = self.args[0]
-        else:
-            self.args = args[:] # work with a copy
-            self.args.insert (0, command)
-            self.command = command
-
-        command_with_path = which(self.command)
-        if command_with_path is None:
-            raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
-        self.command = command_with_path
-        self.args[0] = self.command
-
-        self.name = '<' + ' '.join (self.args) + '>'
-
-        assert self.pid is None, 'The pid member should be None.'
-        assert self.command is not None, 'The command member should not be None.'
-
-        if self.use_native_pty_fork:
-            try:
-                self.pid, self.child_fd = pty.fork()
-            except OSError, e:
-                raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
-        else: # Use internal __fork_pty
-            self.pid, self.child_fd = self.__fork_pty()
-
-        if self.pid == 0: # Child
-            try:
-                self.child_fd = sys.stdout.fileno() # used by setwinsize()
-                self.setwinsize(24, 80)
-            except:
-                # Some platforms do not like setwinsize (Cygwin).
-                # This will cause problem when running applications that
-                # are very picky about window size.
-                # This is a serious limitation, but not a show stopper.
-                pass
-            # Do not allow child to inherit open file descriptors from parent.
-            max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
-            for i in range (3, max_fd):
-                try:
-                    os.close (i)
-                except OSError:
-                    pass
-
-            # I don't know why this works, but ignoring SIGHUP fixes a
-            # problem when trying to start a Java daemon with sudo
-            # (specifically, Tomcat).
-            signal.signal(signal.SIGHUP, signal.SIG_IGN)
-
-            if self.cwd is not None:
-                os.chdir(self.cwd)
-            if self.env is None:
-                os.execv(self.command, self.args)
-            else:
-                os.execvpe(self.command, self.args, self.env)
-
-        # Parent
-        self.terminated = False
-        self.closed = False
-
-    def __fork_pty(self):
-
-        """This implements a substitute for the forkpty system call. This
-        should be more portable than the pty.fork() function. Specifically,
-        this should work on Solaris.
-
-        Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
-        resolve the issue with Python's pty.fork() not supporting Solaris,
-        particularly ssh. Based on patch to posixmodule.c authored by Noah
-        Spurrier::
-
-            http://mail.python.org/pipermail/python-dev/2003-May/035281.html
-
-        """
-
-        parent_fd, child_fd = os.openpty()
-        if parent_fd < 0 or child_fd < 0:
-            raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
-
-        pid = os.fork()
-        if pid < 0:
-            raise ExceptionPexpect, "Error! Failed os.fork()."
-        elif pid == 0:
-            # Child.
-            os.close(parent_fd)
-            self.__pty_make_controlling_tty(child_fd)
-
-            os.dup2(child_fd, 0)
-            os.dup2(child_fd, 1)
-            os.dup2(child_fd, 2)
-
-            if child_fd > 2:
-                os.close(child_fd)
-        else:
-            # Parent.
-            os.close(child_fd)
-
-        return pid, parent_fd
-
-    def __pty_make_controlling_tty(self, tty_fd):
-
-        """This makes the pseudo-terminal the controlling tty. This should be
-        more portable than the pty.fork() function. Specifically, this should
-        work on Solaris. """
-
-        child_name = os.ttyname(tty_fd)
-
-        # Disconnect from controlling tty if still connected.
-        fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
-        if fd >= 0:
-            os.close(fd)
-
-        os.setsid()
-
-        # Verify we are disconnected from controlling tty
-        try:
-            fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
-            if fd >= 0:
-                os.close(fd)
-                raise ExceptionPexpect, "Error! We are not disconnected from a controlling tty."
-        except:
-            # Good! We are disconnected from a controlling tty.
-            pass
-
-        # Verify we can open child pty.
-        fd = os.open(child_name, os.O_RDWR);
-        if fd < 0:
-            raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
-        else:
-            os.close(fd)
-
-        # Verify we now have a controlling tty.
-        fd = os.open("/dev/tty", os.O_WRONLY)
-        if fd < 0:
-            raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
-        else:
-            os.close(fd)
-
-    def fileno (self):   # File-like object.
-
-        """This returns the file descriptor of the pty for the child.
-        """
-
-        return self.child_fd
-
-    def close (self, force=True):   # File-like object.
-
-        """This closes the connection with the child application. Note that
-        calling close() more than once is valid. This emulates standard Python
-        behavior with files. Set force to True if you want to make sure that
-        the child is terminated (SIGKILL is sent if the child ignores SIGHUP
-        and SIGINT). """
-
-        if not self.closed:
-            self.flush()
-            os.close (self.child_fd)
-            time.sleep(self.delayafterclose) # Give kernel time to update process status.
-            if self.isalive():
-                if not self.terminate(force):
-                    raise ExceptionPexpect ('close() could not terminate the child using terminate()')
-            self.child_fd = -1
-            self.closed = True
-            #self.pid = None
-
-    def flush (self):   # File-like object.
-
-        """This does nothing. It is here to support the interface for a
-        File-like object. """
-
-        pass
-
-    def isatty (self):   # File-like object.
-
-        """This returns True if the file descriptor is open and connected to a
-        tty(-like) device, else False. """
-
-        return os.isatty(self.child_fd)
-
-    def waitnoecho (self, timeout=-1):
-
-        """This waits until the terminal ECHO flag is set False. This returns
-        True if the echo mode is off. This returns False if the ECHO flag was
-        not set False before the timeout. This can be used to detect when the
-        child is waiting for a password. Usually a child application will turn
-        off echo mode when it is waiting for the user to enter a password. For
-        example, instead of expecting the "password:" prompt you can wait for
-        the child to set ECHO off::
-
-            p = pexpect.spawn ('ssh user at example.com')
-            p.waitnoecho()
-            p.sendline(mypassword)
-
-        If timeout is None then this method to block forever until ECHO flag is
-        False.
-
-        """
-
-        if timeout == -1:
-            timeout = self.timeout
-        if timeout is not None:
-            end_time = time.time() + timeout 
-        while True:
-            if not self.getecho():
-                return True
-            if timeout < 0 and timeout is not None:
-                return False
-            if timeout is not None:
-                timeout = end_time - time.time()
-            time.sleep(0.1)
-
-    def getecho (self):
-
-        """This returns the terminal echo mode. This returns True if echo is
-        on or False if echo is off. Child applications that are expecting you
-        to enter a password often set ECHO False. See waitnoecho(). """
-
-        attr = termios.tcgetattr(self.child_fd)
-        if attr[3] & termios.ECHO:
-            return True
-        return False
-
-    def setecho (self, state):
-
-        """This sets the terminal echo mode on or off. Note that anything the
-        child sent before the echo will be lost, so you should be sure that
-        your input buffer is empty before you call setecho(). For example, the
-        following will work as expected::
-
-            p = pexpect.spawn('cat')
-            p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
-            p.expect (['1234'])
-            p.expect (['1234'])
-            p.setecho(False) # Turn off tty echo
-            p.sendline ('abcd') # We will set this only once (echoed by cat).
-            p.sendline ('wxyz') # We will set this only once (echoed by cat)
-            p.expect (['abcd'])
-            p.expect (['wxyz'])
-
-        The following WILL NOT WORK because the lines sent before the setecho
-        will be lost::
-
-            p = pexpect.spawn('cat')
-            p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
-            p.setecho(False) # Turn off tty echo
-            p.sendline ('abcd') # We will set this only once (echoed by cat).
-            p.sendline ('wxyz') # We will set this only once (echoed by cat)
-            p.expect (['1234'])
-            p.expect (['1234'])
-            p.expect (['abcd'])
-            p.expect (['wxyz'])
-        """
-
-        self.child_fd
-        attr = termios.tcgetattr(self.child_fd)
-        if state:
-            attr[3] = attr[3] | termios.ECHO
-        else:
-            attr[3] = attr[3] & ~termios.ECHO
-        # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
-        # and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
-        termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
-
-    def read_nonblocking (self, size = 1, timeout = -1):
-
-        """This reads at most size characters from the child application. It
-        includes a timeout. If the read does not complete within the timeout
-        period then a TIMEOUT exception is raised. If the end of file is read
-        then an EOF exception will be raised. If a log file was set using
-        setlog() then all data will also be written to the log file.
-
-        If timeout is None then the read may block indefinitely. If timeout is -1
-        then the self.timeout value is used. If timeout is 0 then the child is
-        polled and if there was no data immediately ready then this will raise
-        a TIMEOUT exception.
-
-        The timeout refers only to the amount of time to read at least one
-        character. This is not effected by the 'size' parameter, so if you call
-        read_nonblocking(size=100, timeout=30) and only one character is
-        available right away then one character will be returned immediately.
-        It will not wait for 30 seconds for another 99 characters to come in.
-
-        This is a wrapper around os.read(). It uses select.select() to
-        implement the timeout. """
-
-        if self.closed:
-            raise ValueError ('I/O operation on closed file in read_nonblocking().')
-
-        if timeout == -1:
-            timeout = self.timeout
-
-        # Note that some systems such as Solaris do not give an EOF when
-        # the child dies. In fact, you can still try to read
-        # from the child_fd -- it will block forever or until TIMEOUT.
-        # For this case, I test isalive() before doing any reading.
-        # If isalive() is false, then I pretend that this is the same as EOF.
-        if not self.isalive():
-            r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
-            if not r:
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
-        elif self.__irix_hack:
-            # This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
-            # This adds a 2 second delay, but only when the child is terminated.
-            r, w, e = self.__select([self.child_fd], [], [], 2)
-            if not r and not self.isalive():
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
-
-        r,w,e = self.__select([self.child_fd], [], [], timeout)
-
-        if not r:
-            if not self.isalive():
-                # Some platforms, such as Irix, will claim that their processes are alive;
-                # then timeout on the select; and then finally admit that they are not alive.
-                self.flag_eof = True
-                raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
-            else:
-                raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
-
-        if self.child_fd in r:
-            try:
-                s = os.read(self.child_fd, size)
-            except OSError, e: # Linux does this
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
-            if s == '': # BSD style
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
-
-            if self.logfile is not None:
-                self.logfile.write (s)
-                self.logfile.flush()
-            if self.logfile_read is not None:
-                self.logfile_read.write (s)
-                self.logfile_read.flush()
-
-            return s
-
-        raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
-
-    def read (self, size = -1):   # File-like object.
-
-        """This reads at most "size" bytes from the file (less if the read hits
-        EOF before obtaining size bytes). If the size argument is negative or
-        omitted, read all data until EOF is reached. The bytes are returned as
-        a string object. An empty string is returned when EOF is encountered
-        immediately. """
-
-        if size == 0:
-            return ''
-        if size < 0:
-            self.expect (self.delimiter) # delimiter default is EOF
-            return self.before
-
-        # I could have done this more directly by not using expect(), but
-        # I deliberately decided to couple read() to expect() so that
-        # I would catch any bugs early and ensure consistant behavior.
-        # It's a little less efficient, but there is less for me to
-        # worry about if I have to later modify read() or expect().
-        # Note, it's OK if size==-1 in the regex. That just means it
-        # will never match anything in which case we stop only on EOF.
-        cre = re.compile('.{%d}' % size, re.DOTALL)
-        index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
-        if index == 0:
-            return self.after ### self.before should be ''. Should I assert this?
-        return self.before
-
-    def readline (self, size = -1):    # File-like object.
-
-        """This reads and returns one entire line. A trailing newline is kept
-        in the string, but may be absent when a file ends with an incomplete
-        line. Note: This readline() looks for a \\r\\n pair even on UNIX
-        because this is what the pseudo tty device returns. So contrary to what
-        you may expect you will receive the newline as \\r\\n. An empty string
-        is returned when EOF is hit immediately. Currently, the size argument is
-        mostly ignored, so this behavior is not standard for a file-like
-        object. If size is 0 then an empty string is returned. """
-
-        if size == 0:
-            return ''
-        index = self.expect (['\r\n', self.delimiter]) # delimiter default is EOF
-        if index == 0:
-            return self.before + '\r\n'
-        else:
-            return self.before
-
-    def __iter__ (self):    # File-like object.
-
-        """This is to support iterators over a file-like object.
-        """
-
-        return self
-
-    def next (self):    # File-like object.
-
-        """This is to support iterators over a file-like object.
-        """
-
-        result = self.readline()
-        if result == "":
-            raise StopIteration
-        return result
-
-    def readlines (self, sizehint = -1):    # File-like object.
-
-        """This reads until EOF using readline() and returns a list containing
-        the lines thus read. The optional "sizehint" argument is ignored. """
-
-        lines = []
-        while True:
-            line = self.readline()
-            if not line:
-                break
-            lines.append(line)
-        return lines
-
-    def write(self, s):   # File-like object.
-
-        """This is similar to send() except that there is no return value.
-        """
-
-        self.send (s)
-
-    def writelines (self, sequence):   # File-like object.
-
-        """This calls write() for each element in the sequence. The sequence
-        can be any iterable object producing strings, typically a list of
-        strings. This does not add line separators There is no return value.
-        """
-
-        for s in sequence:
-            self.write (s)
-
-    def send(self, s):
-
-        """This sends a string to the child process. This returns the number of
-        bytes written. If a log file was set then the data is also written to
-        the log. """
-
-        time.sleep(self.delaybeforesend)
-        if self.logfile is not None:
-            self.logfile.write (s)
-            self.logfile.flush()
-        if self.logfile_send is not None:
-            self.logfile_send.write (s)
-            self.logfile_send.flush()
-        c = os.write(self.child_fd, s)
-        return c
-
-    def sendline(self, s=''):
-
-        """This is like send(), but it adds a line feed (os.linesep). This
-        returns the number of bytes written. """
-
-        n = self.send(s)
-        n = n + self.send (os.linesep)
-        return n
-
-    def sendcontrol(self, char):
-
-        """This sends a control character to the child such as Ctrl-C or
-        Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
-
-            child.sendcontrol('g')
-
-        See also, sendintr() and sendeof().
-        """
-
-        char = char.lower()
-        a = ord(char)
-        if a>=97 and a<=122:
-            a = a - ord('a') + 1
-            return self.send (chr(a))
-        d = {'@':0, '`':0,
-            '[':27, '{':27,
-            '\\':28, '|':28,
-            ']':29, '}': 29,
-            '^':30, '~':30,
-            '_':31,
-            '?':127}
-        if char not in d:
-            return 0
-        return self.send (chr(d[char]))
-
-    def sendeof(self):
-
-        """This sends an EOF to the child. This sends a character which causes
-        the pending parent output buffer to be sent to the waiting child
-        program without waiting for end-of-line. If it is the first character
-        of the line, the read() in the user program returns 0, which signifies
-        end-of-file. This means to work as expected a sendeof() has to be
-        called at the beginning of a line. This method does not send a newline.
-        It is the responsibility of the caller to ensure the eof is sent at the
-        beginning of a line. """
-
-        ### Hmmm... how do I send an EOF?
-        ###C  if ((m = write(pty, *buf, p - *buf)) < 0)
-        ###C      return (errno == EWOULDBLOCK) ? n : -1;
-        #fd = sys.stdin.fileno()
-        #old = termios.tcgetattr(fd) # remember current state
-        #attr = termios.tcgetattr(fd)
-        #attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
-        #try: # use try/finally to ensure state gets restored
-        #    termios.tcsetattr(fd, termios.TCSADRAIN, attr)
-        #    if hasattr(termios, 'CEOF'):
-        #        os.write (self.child_fd, '%c' % termios.CEOF)
-        #    else:
-        #        # Silly platform does not define CEOF so assume CTRL-D
-        #        os.write (self.child_fd, '%c' % 4)
-        #finally: # restore state
-        #    termios.tcsetattr(fd, termios.TCSADRAIN, old)
-        if hasattr(termios, 'VEOF'):
-            char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
-        else:
-            # platform does not define VEOF so assume CTRL-D
-            char = chr(4)
-        self.send(char)
-
-    def sendintr(self):
-
-        """This sends a SIGINT to the child. It does not require
-        the SIGINT to be the first character on a line. """
-
-        if hasattr(termios, 'VINTR'):
-            char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
-        else:
-            # platform does not define VINTR so assume CTRL-C
-            char = chr(3)
-        self.send (char)
-
-    def eof (self):
-
-        """This returns True if the EOF exception was ever raised.
-        """
-
-        return self.flag_eof
-
-    def terminate(self, force=False):
-
-        """This forces a child process to terminate. It starts nicely with
-        SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
-        returns True if the child was terminated. This returns False if the
-        child could not be terminated. """
-
-        if not self.isalive():
-            return True
-        try:
-            self.kill(signal.SIGHUP)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            self.kill(signal.SIGCONT)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            self.kill(signal.SIGINT)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            if force:
-                self.kill(signal.SIGKILL)
-                time.sleep(self.delayafterterminate)
-                if not self.isalive():
-                    return True
-                else:
-                    return False
-            return False
-        except OSError, e:
-            # I think there are kernel timing issues that sometimes cause
-            # this to happen. I think isalive() reports True, but the
-            # process is dead to the kernel.
-            # Make one last attempt to see if the kernel is up to date.
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            else:
-                return False
-
-    def wait(self):
-
-        """This waits until the child exits. This is a blocking call. This will
-        not read any data from the child, so this will block forever if the
-        child has unread output and has terminated. In other words, the child
-        may have printed output then called exit(); but, technically, the child
-        is still alive until its output is read. """
-
-        if self.isalive():
-            pid, status = os.waitpid(self.pid, 0)
-        else:
-            raise ExceptionPexpect ('Cannot wait for dead child process.')
-        self.exitstatus = os.WEXITSTATUS(status)
-        if os.WIFEXITED (status):
-            self.status = status
-            self.exitstatus = os.WEXITSTATUS(status)
-            self.signalstatus = None
-            self.terminated = True
-        elif os.WIFSIGNALED (status):
-            self.status = status
-            self.exitstatus = None
-            self.signalstatus = os.WTERMSIG(status)
-            self.terminated = True
-        elif os.WIFSTOPPED (status):
-            raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
-        return self.exitstatus
-
-    def isalive(self):
-
-        """This tests if the child process is running or not. This is
-        non-blocking. If the child was terminated then this will read the
-        exitstatus or signalstatus of the child. This returns True if the child
-        process appears to be running or False if not. It can take literally
-        SECONDS for Solaris to return the right status. """
-
-        if self.terminated:
-            return False
-
-        if self.flag_eof:
-            # This is for Linux, which requires the blocking form of waitpid to get
-            # status of a defunct process. This is super-lame. The flag_eof would have
-            # been set in read_nonblocking(), so this should be safe.
-            waitpid_options = 0
-        else:
-            waitpid_options = os.WNOHANG
-
-        try:
-            pid, status = os.waitpid(self.pid, waitpid_options)
-        except OSError, e: # No child processes
-            if e[0] == errno.ECHILD:
-                raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
-            else:
-                raise e
-
-        # I have to do this twice for Solaris. I can't even believe that I figured this out...
-        # If waitpid() returns 0 it means that no child process wishes to
-        # report, and the value of status is undefined.
-        if pid == 0:
-            try:
-                pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
-            except OSError, e: # This should never happen...
-                if e[0] == errno.ECHILD:
-                    raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
-                else:
-                    raise e
-
-            # If pid is still 0 after two calls to waitpid() then
-            # the process really is alive. This seems to work on all platforms, except
-            # for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
-            # take care of this situation (unfortunately, this requires waiting through the timeout).
-            if pid == 0:
-                return True
-
-        if pid == 0:
-            return True
-
-        if os.WIFEXITED (status):
-            self.status = status
-            self.exitstatus = os.WEXITSTATUS(status)
-            self.signalstatus = None
-            self.terminated = True
-        elif os.WIFSIGNALED (status):
-            self.status = status
-            self.exitstatus = None
-            self.signalstatus = os.WTERMSIG(status)
-            self.terminated = True
-        elif os.WIFSTOPPED (status):
-            raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
-        return False
-
-    def kill(self, sig):
-
-        """This sends the given signal to the child application. In keeping
-        with UNIX tradition it has a misleading name. It does not necessarily
-        kill the child unless you send the right signal. """
-
-        # Same as os.kill, but the pid is given for you.
-        if self.isalive():
-            os.kill(self.pid, sig)
-
-    def compile_pattern_list(self, patterns):
-
-        """This compiles a pattern-string or a list of pattern-strings.
-        Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
-        those. Patterns may also be None which results in an empty list (you
-        might do this if waiting for an EOF or TIMEOUT condition without
-        expecting any pattern).
-
-        This is used by expect() when calling expect_list(). Thus expect() is
-        nothing more than::
-
-             cpl = self.compile_pattern_list(pl)
-             return self.expect_list(cpl, timeout)
-
-        If you are using expect() within a loop it may be more
-        efficient to compile the patterns first and then call expect_list().
-        This avoid calls in a loop to compile_pattern_list()::
-
-             cpl = self.compile_pattern_list(my_pattern)
-             while some_condition:
-                ...
-                i = self.expect_list(clp, timeout)
-                ...
-        """
-
-        if patterns is None:
-            return []
-        if type(patterns) is not types.ListType:
-            patterns = [patterns]
-
-        compile_flags = re.DOTALL # Allow dot to match \n
-        if self.ignorecase:
-            compile_flags = compile_flags | re.IGNORECASE
-        compiled_pattern_list = []
-        for p in patterns:
-            if type(p) in types.StringTypes:
-                compiled_pattern_list.append(re.compile(p, compile_flags))
-            elif p is EOF:
-                compiled_pattern_list.append(EOF)
-            elif p is TIMEOUT:
-                compiled_pattern_list.append(TIMEOUT)
-            elif type(p) is type(re.compile('')):
-                compiled_pattern_list.append(p)
-            else:
-                raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
-
-        return compiled_pattern_list
-
-    def expect(self, pattern, timeout = -1, searchwindowsize=None):
-
-        """This seeks through the stream until a pattern is matched. The
-        pattern is overloaded and may take several types. The pattern can be a
-        StringType, EOF, a compiled re, or a list of any of those types.
-        Strings will be compiled to re types. This returns the index into the
-        pattern list. If the pattern was not a list this returns index 0 on a
-        successful match. This may raise exceptions for EOF or TIMEOUT. To
-        avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
-        list. That will cause expect to match an EOF or TIMEOUT condition
-        instead of raising an exception.
-
-        If you pass a list of patterns and more than one matches, the first match
-        in the stream is chosen. If more than one pattern matches at that point,
-        the leftmost in the pattern list is chosen. For example::
-
-            # the input is 'foobar'
-            index = p.expect (['bar', 'foo', 'foobar'])
-            # returns 1 ('foo') even though 'foobar' is a "better" match
-
-        Please note, however, that buffering can affect this behavior, since
-        input arrives in unpredictable chunks. For example::
-
-            # the input is 'foobar'
-            index = p.expect (['foobar', 'foo'])
-            # returns 0 ('foobar') if all input is available at once,
-            # but returs 1 ('foo') if parts of the final 'bar' arrive late
-
-        After a match is found the instance attributes 'before', 'after' and
-        'match' will be set. You can see all the data read before the match in
-        'before'. You can see the data that was matched in 'after'. The
-        re.MatchObject used in the re match will be in 'match'. If an error
-        occurred then 'before' will be set to all the data read so far and
-        'after' and 'match' will be None.
-
-        If timeout is -1 then timeout will be set to the self.timeout value.
-
-        A list entry may be EOF or TIMEOUT instead of a string. This will
-        catch these exceptions and return the index of the list entry instead
-        of raising the exception. The attribute 'after' will be set to the
-        exception type. The attribute 'match' will be None. This allows you to
-        write code like this::
-
-                index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
-                if index == 0:
-                    do_something()
-                elif index == 1:
-                    do_something_else()
-                elif index == 2:
-                    do_some_other_thing()
-                elif index == 3:
-                    do_something_completely_different()
-
-        instead of code like this::
-
-                try:
-                    index = p.expect (['good', 'bad'])
-                    if index == 0:
-                        do_something()
-                    elif index == 1:
-                        do_something_else()
-                except EOF:
-                    do_some_other_thing()
-                except TIMEOUT:
-                    do_something_completely_different()
-
-        These two forms are equivalent. It all depends on what you want. You
-        can also just expect the EOF if you are waiting for all output of a
-        child to finish. For example::
-
-                p = pexpect.spawn('/bin/ls')
-                p.expect (pexpect.EOF)
-                print p.before
-
-        If you are trying to optimize for speed then see expect_list().
-        """
-
-        compiled_pattern_list = self.compile_pattern_list(pattern)
-        return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
-
-    def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
-        """This takes a list of compiled regular expressions and returns the
-        index into the pattern_list that matched the child output. The list may
-        also contain EOF or TIMEOUT (which are not compiled regular
-        expressions). This method is similar to the expect() method except that
-        expect_list() does not recompile the pattern list on every call. This
-        may help if you are trying to optimize for speed, otherwise just use
-        the expect() method.  This is called by expect(). If timeout==-1 then
-        the self.timeout value is used. If searchwindowsize==-1 then the
-        self.searchwindowsize value is used. """
-
-        return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
-
-    def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
-        """This is similar to expect(), but uses plain string matching instead
-        of compiled regular expressions in 'pattern_list'. The 'pattern_list'
-        may be a string; a list or other sequence of strings; or TIMEOUT and
-        EOF.
-
-        This call might be faster than expect() for two reasons: string
-        searching is faster than RE matching and it is possible to limit the
-        search to just the end of the input buffer.
-
-        This method is also useful when you don't want to have to worry about
-        escaping regular expression characters that you want to match."""
-
-        if type(pattern_list) in types.StringTypes or pattern_list in (TIMEOUT, EOF):
-            pattern_list = [pattern_list]
-        return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
-
-    def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
-
-        """This is the common loop used inside expect. The 'searcher' should be
-        an instance of searcher_re or searcher_string, which describes how and what
-        to search for in the input.
-
-        See expect() for other arguments, return value and exceptions. """
-
-        self.searcher = searcher
-
-        if timeout == -1:
-            timeout = self.timeout
-        if timeout is not None:
-            end_time = time.time() + timeout 
-        if searchwindowsize == -1:
-            searchwindowsize = self.searchwindowsize
-
-        try:
-            incoming = self.buffer
-            freshlen = len(incoming)
-            while True: # Keep reading until exception or return.
-                index = searcher.search(incoming, freshlen, searchwindowsize)
-                if index >= 0:
-                    self.buffer = incoming[searcher.end : ]
-                    self.before = incoming[ : searcher.start]
-                    self.after = incoming[searcher.start : searcher.end]
-                    self.match = searcher.match
-                    self.match_index = index
-                    return self.match_index
-                # No match at this point
-                if timeout < 0 and timeout is not None:
-                    raise TIMEOUT ('Timeout exceeded in expect_any().')
-                # Still have time left, so read more data
-                c = self.read_nonblocking (self.maxread, timeout)
-                freshlen = len(c)
-                time.sleep (0.0001)
-                incoming = incoming + c
-                if timeout is not None:
-                    timeout = end_time - time.time()
-        except EOF, e:
-            self.buffer = ''
-            self.before = incoming
-            self.after = EOF
-            index = searcher.eof_index
-            if index >= 0:
-                self.match = EOF
-                self.match_index = index
-                return self.match_index
-            else:
-                self.match = None
-                self.match_index = None
-                raise EOF (str(e) + '\n' + str(self))
-        except TIMEOUT, e:
-            self.buffer = incoming
-            self.before = incoming
-            self.after = TIMEOUT
-            index = searcher.timeout_index
-            if index >= 0:
-                self.match = TIMEOUT
-                self.match_index = index
-                return self.match_index
-            else:
-                self.match = None
-                self.match_index = None
-                raise TIMEOUT (str(e) + '\n' + str(self))
-        except:
-            self.before = incoming
-            self.after = None
-            self.match = None
-            self.match_index = None
-            raise
-
-    def getwinsize(self):
-
-        """This returns the terminal window size of the child tty. The return
-        value is a tuple of (rows, cols). """
-
-        TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
-        s = struct.pack('HHHH', 0, 0, 0, 0)
-        x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
-        return struct.unpack('HHHH', x)[0:2]
-
-    def setwinsize(self, r, c):
-
-        """This sets the terminal window size of the child tty. This will cause
-        a SIGWINCH signal to be sent to the child. This does not change the
-        physical window size. It changes the size reported to TTY-aware
-        applications like vi or curses -- applications that respond to the
-        SIGWINCH signal. """
-
-        # Check for buggy platforms. Some Python versions on some platforms
-        # (notably OSF1 Alpha and RedHat 7.1) truncate the value for
-        # termios.TIOCSWINSZ. It is not clear why this happens.
-        # These platforms don't seem to handle the signed int very well;
-        # yet other platforms like OpenBSD have a large negative value for
-        # TIOCSWINSZ and they don't have a truncate problem.
-        # Newer versions of Linux have totally different values for TIOCSWINSZ.
-        # Note that this fix is a hack.
-        TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
-        if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
-            TIOCSWINSZ = -2146929561 # Same bits, but with sign.
-        # Note, assume ws_xpixel and ws_ypixel are zero.
-        s = struct.pack('HHHH', r, c, 0, 0)
-        fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
-
-    def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
-
-        """This gives control of the child process to the interactive user (the
-        human at the keyboard). Keystrokes are sent to the child process, and
-        the stdout and stderr output of the child process is printed. This
-        simply echos the child stdout and child stderr to the real stdout and
-        it echos the real stdin to the child stdin. When the user types the
-        escape_character this method will stop. The default for
-        escape_character is ^]. This should not be confused with ASCII 27 --
-        the ESC character. ASCII 29 was chosen for historical merit because
-        this is the character used by 'telnet' as the escape character. The
-        escape_character will not be sent to the child process.
-
-        You may pass in optional input and output filter functions. These
-        functions should take a string and return a string. The output_filter
-        will be passed all the output from the child process. The input_filter
-        will be passed all the keyboard input from the user. The input_filter
-        is run BEFORE the check for the escape_character.
-
-        Note that if you change the window size of the parent the SIGWINCH
-        signal will not be passed through to the child. If you want the child
-        window size to change when the parent's window size changes then do
-        something like the following example::
-
-            import pexpect, struct, fcntl, termios, signal, sys
-            def sigwinch_passthrough (sig, data):
-                s = struct.pack("HHHH", 0, 0, 0, 0)
-                a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
-                global p
-                p.setwinsize(a[0],a[1])
-            p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
-            signal.signal(signal.SIGWINCH, sigwinch_passthrough)
-            p.interact()
-        """
-
-        # Flush the buffer.
-        self.stdout.write (self.buffer)
-        self.stdout.flush()
-        self.buffer = ''
-        mode = tty.tcgetattr(self.STDIN_FILENO)
-        tty.setraw(self.STDIN_FILENO)
-        try:
-            self.__interact_copy(escape_character, input_filter, output_filter)
-        finally:
-            tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
-
-    def __interact_writen(self, fd, data):
-
-        """This is used by the interact() method.
-        """
-
-        while data != '' and self.isalive():
-            n = os.write(fd, data)
-            data = data[n:]
-
-    def __interact_read(self, fd):
-
-        """This is used by the interact() method.
-        """
-
-        return os.read(fd, 1000)
-
-    def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
-
-        """This is used by the interact() method.
-        """
-
-        while self.isalive():
-            r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
-            if self.child_fd in r:
-                data = self.__interact_read(self.child_fd)
-                if output_filter: data = output_filter(data)
-                if self.logfile is not None:
-                    self.logfile.write (data)
-                    self.logfile.flush()
-                os.write(self.STDOUT_FILENO, data)
-            if self.STDIN_FILENO in r:
-                data = self.__interact_read(self.STDIN_FILENO)
-                if input_filter: data = input_filter(data)
-                i = data.rfind(escape_character)
-                if i != -1:
-                    data = data[:i]
-                    self.__interact_writen(self.child_fd, data)
-                    break
-                self.__interact_writen(self.child_fd, data)
-
-    def __select (self, iwtd, owtd, ewtd, timeout=None):
-
-        """This is a wrapper around select.select() that ignores signals. If
-        select.select raises a select.error exception and errno is an EINTR
-        error then it is ignored. Mainly this is used to ignore sigwinch
-        (terminal resize). """
-
-        # if select() is interrupted by a signal (errno==EINTR) then
-        # we loop back and enter the select() again.
-        if timeout is not None:
-            end_time = time.time() + timeout
-        while True:
-            try:
-                return select.select (iwtd, owtd, ewtd, timeout)
-            except select.error, e:
-                if e[0] == errno.EINTR:
-                    # if we loop back we have to subtract the amount of time we already waited.
-                    if timeout is not None:
-                        timeout = end_time - time.time()
-                        if timeout < 0:
-                            return ([],[],[])
-                else: # something else caused the select.error, so this really is an exception
-                    raise
-
-##############################################################################
-# The following methods are no longer supported or allowed.
-
-    def setmaxread (self, maxread):
-
-        """This method is no longer supported or allowed. I don't like getters
-        and setters without a good reason. """
-
-        raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the maxread member variable.')
-
-    def setlog (self, fileobject):
-
-        """This method is no longer supported or allowed.
-        """
-
-        raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the logfile member variable.')
-
-##############################################################################
-# End of spawn class
-##############################################################################
-
-class searcher_string (object):
-
-    """This is a plain string search helper for the spawn.expect_any() method.
-
-    Attributes:
-
-        eof_index     - index of EOF, or -1
-        timeout_index - index of TIMEOUT, or -1
-
-    After a successful match by the search() method the following attributes
-    are available:
-
-        start - index into the buffer, first byte of match
-        end   - index into the buffer, first byte after match
-        match - the matching string itself
-    """
-
-    def __init__(self, strings):
-
-        """This creates an instance of searcher_string. This argument 'strings'
-        may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
-
-        self.eof_index = -1
-        self.timeout_index = -1
-        self._strings = []
-        for n, s in zip(range(len(strings)), strings):
-            if s is EOF:
-                self.eof_index = n
-                continue
-            if s is TIMEOUT:
-                self.timeout_index = n
-                continue
-            self._strings.append((n, s))
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object."""
-
-        ss =  [ (ns[0],'    %d: "%s"' % ns) for ns in self._strings ]
-        ss.append((-1,'searcher_string:'))
-        if self.eof_index >= 0:
-            ss.append ((self.eof_index,'    %d: EOF' % self.eof_index))
-        if self.timeout_index >= 0:
-            ss.append ((self.timeout_index,'    %d: TIMEOUT' % self.timeout_index))
-        ss.sort()
-        ss = zip(*ss)[1]
-        return '\n'.join(ss)
-
-    def search(self, buffer, freshlen, searchwindowsize=None):
-
-        """This searches 'buffer' for the first occurence of one of the search
-        strings.  'freshlen' must indicate the number of bytes at the end of
-        'buffer' which have not been searched before. It helps to avoid
-        searching the same, possibly big, buffer over and over again.
-
-        See class spawn for the 'searchwindowsize' argument.
-
-        If there is a match this returns the index of that string, and sets
-        'start', 'end' and 'match'. Otherwise, this returns -1. """
-
-        absurd_match = len(buffer)
-        first_match = absurd_match
-
-        # 'freshlen' helps a lot here. Further optimizations could
-        # possibly include:
-        #
-        # using something like the Boyer-Moore Fast String Searching
-        # Algorithm; pre-compiling the search through a list of
-        # strings into something that can scan the input once to
-        # search for all N strings; realize that if we search for
-        # ['bar', 'baz'] and the input is '...foo' we need not bother
-        # rescanning until we've read three more bytes.
-        #
-        # Sadly, I don't know enough about this interesting topic. /grahn
-        
-        for index, s in self._strings:
-            if searchwindowsize is None:
-                # the match, if any, can only be in the fresh data,
-                # or at the very end of the old data
-                offset = -(freshlen+len(s))
-            else:
-                # better obey searchwindowsize
-                offset = -searchwindowsize
-            n = buffer.find(s, offset)
-            if n >= 0 and n < first_match:
-                first_match = n
-                best_index, best_match = index, s
-        if first_match == absurd_match:
-            return -1
-        self.match = best_match
-        self.start = first_match
-        self.end = self.start + len(self.match)
-        return best_index
-
-class searcher_re (object):
-
-    """This is regular expression string search helper for the
-    spawn.expect_any() method.
-
-    Attributes:
-
-        eof_index     - index of EOF, or -1
-        timeout_index - index of TIMEOUT, or -1
-
-    After a successful match by the search() method the following attributes
-    are available:
-
-        start - index into the buffer, first byte of match
-        end   - index into the buffer, first byte after match
-        match - the re.match object returned by a succesful re.search
-
-    """
-
-    def __init__(self, patterns):
-
-        """This creates an instance that searches for 'patterns' Where
-        'patterns' may be a list or other sequence of compiled regular
-        expressions, or the EOF or TIMEOUT types."""
-
-        self.eof_index = -1
-        self.timeout_index = -1
-        self._searches = []
-        for n, s in zip(range(len(patterns)), patterns):
-            if s is EOF:
-                self.eof_index = n
-                continue
-            if s is TIMEOUT:
-                self.timeout_index = n
-                continue
-            self._searches.append((n, s))
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object."""
-
-        ss =  [ (n,'    %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
-        ss.append((-1,'searcher_re:'))
-        if self.eof_index >= 0:
-            ss.append ((self.eof_index,'    %d: EOF' % self.eof_index))
-        if self.timeout_index >= 0:
-            ss.append ((self.timeout_index,'    %d: TIMEOUT' % self.timeout_index))
-        ss.sort()
-        ss = zip(*ss)[1]
-        return '\n'.join(ss)
-
-    def search(self, buffer, freshlen, searchwindowsize=None):
-
-        """This searches 'buffer' for the first occurence of one of the regular
-        expressions. 'freshlen' must indicate the number of bytes at the end of
-        'buffer' which have not been searched before.
-
-        See class spawn for the 'searchwindowsize' argument.
-        
-        If there is a match this returns the index of that string, and sets
-        'start', 'end' and 'match'. Otherwise, returns -1."""
-
-        absurd_match = len(buffer)
-        first_match = absurd_match
-        # 'freshlen' doesn't help here -- we cannot predict the
-        # length of a match, and the re module provides no help.
-        if searchwindowsize is None:
-            searchstart = 0
-        else:
-            searchstart = max(0, len(buffer)-searchwindowsize)
-        for index, s in self._searches:
-            match = s.search(buffer, searchstart)
-            if match is None:
-                continue
-            n = match.start()
-            if n < first_match:
-                first_match = n
-                the_match = match
-                best_index = index
-        if first_match == absurd_match:
-            return -1
-        self.start = first_match
-        self.match = the_match
-        self.end = self.match.end()
-        return best_index
-
-def which (filename):
-
-    """This takes a given filename; tries to find it in the environment path;
-    then checks if it is executable. This returns the full path to the filename
-    if found and executable. Otherwise this returns None."""
-
-    # Special case where filename already contains a path.
-    if os.path.dirname(filename) != '':
-        if os.access (filename, os.X_OK):
-            return filename
-
-    if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
-        p = os.defpath
-    else:
-        p = os.environ['PATH']
-
-    # Oddly enough this was the one line that made Pexpect
-    # incompatible with Python 1.5.2.
-    #pathlist = p.split (os.pathsep)
-    pathlist = string.split (p, os.pathsep)
-
-    for path in pathlist:
-        f = os.path.join(path, filename)
-        if os.access(f, os.X_OK):
-            return f
-    return None
-
-def split_command_line(command_line):
-
-    """This splits a command line into a list of arguments. It splits arguments
-    on spaces, but handles embedded quotes, doublequotes, and escaped
-    characters. It's impossible to do this with a regular expression, so I
-    wrote a little state machine to parse the command line. """
-
-    arg_list = []
-    arg = ''
-
-    # Constants to name the states we can be in.
-    state_basic = 0
-    state_esc = 1
-    state_singlequote = 2
-    state_doublequote = 3
-    state_whitespace = 4 # The state of consuming whitespace between commands.
-    state = state_basic
-
-    for c in command_line:
-        if state == state_basic or state == state_whitespace:
-            if c == '\\': # Escape the next character
-                state = state_esc
-            elif c == r"'": # Handle single quote
-                state = state_singlequote
-            elif c == r'"': # Handle double quote
-                state = state_doublequote
-            elif c.isspace():
-                # Add arg to arg_list if we aren't in the middle of whitespace.
-                if state == state_whitespace:
-                    None # Do nothing.
-                else:
-                    arg_list.append(arg)
-                    arg = ''
-                    state = state_whitespace
-            else:
-                arg = arg + c
-                state = state_basic
-        elif state == state_esc:
-            arg = arg + c
-            state = state_basic
-        elif state == state_singlequote:
-            if c == r"'":
-                state = state_basic
-            else:
-                arg = arg + c
-        elif state == state_doublequote:
-            if c == r'"':
-                state = state_basic
-            else:
-                arg = arg + c
-
-    if arg != '':
-        arg_list.append(arg)
-    return arg_list
-
-# vi:ts=4:sw=4:expandtab:ft=python:


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/visualization/_MPL.c
--- a/yt/visualization/_MPL.c
+++ b/yt/visualization/_MPL.c
@@ -139,6 +139,7 @@
   xiter[0] = yiter[0] = 0;
   xiterv[0] = yiterv[0] = 0.0;
 
+  Py_BEGIN_ALLOW_THREADS
   for(i=0;i<rows;i++)for(j=0;j<cols;j++)
       *(npy_float64*) PyArray_GETPTR2(my_array, i, j) = 0.0;
   for(p=0;p<nx;p++)
@@ -187,6 +188,7 @@
       }
     }
   }
+  Py_END_ALLOW_THREADS
 
   // Attatch output buffer to output buffer
 


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,7 +49,8 @@
     splat_points, \
     annotate_image, \
     apply_colormap, \
-    scale_image
+    scale_image, \
+    write_projection
 
 from plot_modifications import \
     PlotCallback, \


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -327,3 +327,87 @@
     im = image.copy()
     au.add_points_to_image(im, points_x, points_y, val)
     return im
+
+def write_projection(data, filename, colorbar=True, colorbar_label=None, 
+                    title=None, limits=None, take_log=True, var_fig_size=False):
+    r"""Write a projection or volume rendering to disk with a variety of 
+    pretty parameters such as limits, title, colorbar, etc.  write_projection
+    uses the standard matplotlib interface to create the figure.  N.B. This code
+    only works *after* you have created the projection using the standard 
+    framework (i.e. the Camera interface or off_axis_projection).
+
+    Accepts an NxM sized array representing the projection itself as well
+    as the filename to which you will save this figure.  
+
+    Parameters
+    ----------
+    data : array_like 
+        image array as output by off_axis_projection or camera.snapshot()
+    filename : string 
+        the filename where the data will be saved
+    colorbar : boolean
+        do you want a colorbar generated to the right of the image?
+    colorbar_label : string
+        the label associated with your colorbar
+    title : string
+        the label at the top of the figure
+    limits : 2-element array_like
+        the lower limit and the upper limit to be plotted in the figure 
+        of the data array
+    take_log : boolean
+        plot the log of the data array (and take the log of the limits if set)?
+    var_fig_size : boolean
+        If we want the resolution (and size) of the output image to scale 
+        with the resolution of the image array.  
+
+    Examples
+    --------
+
+    >>> image = off_axis_projection(pf, c, L, W, N, "Density", no_ghost=False)
+    >>> write_projection(image, 'test.png', 
+                         colorbar_label="Column Density (cm$^{-2}$)", 
+                         title="Offaxis Projection", limits=(1e-3,1e-5), 
+                         take_log=True)
+    """
+    import pylab as pl
+
+    # If this is rendered as log, then apply now.
+    if take_log:
+        data = na.log10(data)
+        if limits is not None:
+            limits = na.log10(limits)
+
+
+    # Create the figure and paint the data on
+    fig = pl.figure()
+    ax = fig.add_subplot(111)
+
+    if limits is not None:
+        cax = ax.imshow(data, vmin=limits[0], vmax=limits[1])
+    else:
+        cax = ax.imshow(data)
+
+    if title:
+        ax.set_title(title)
+
+    # Suppress the x and y pixel counts
+    ax.set_xticks(())
+    ax.set_yticks(())
+
+    # Add a color bar and label if requested
+    if colorbar:
+        cbar = fig.colorbar(cax)
+        if colorbar_label:
+            cbar.ax.set_ylabel(colorbar_label)
+
+    # If we want the resolution of the image to scale with the resolution
+    # of the image array. we increase the dpi value accordingly
+    if var_fig_size:
+        N = data.shape[0]
+        mag_factor = N/480.
+        pl.savefig(filename, dpi=100*mag_factor)
+    else:
+        pl.savefig(filename)
+
+    pl.clf()
+    pl.close()


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -211,9 +211,9 @@
         Only ONE of the following options can be specified. If all 3 are
         specified, they will be used in the following precedence order:
 
-        * `ticks` - a list of floating point numbers at which to put ticks
-        * `minmaxtick` - display DEFAULT ticks with min & max also displayed
-        * `nticks` - if ticks not specified, can automatically determine a
+        * ``ticks`` - a list of floating point numbers at which to put ticks
+        * ``minmaxtick`` - display DEFAULT ticks with min & max also displayed
+        * ``nticks`` - if ticks not specified, can automatically determine a
           number of ticks to be evenly spaced in log space
         """
         for plot in self.plots:
@@ -1713,9 +1713,9 @@
     r"""Construct a multiple axes plot object, with or without a colorbar, into
     which multiple plots may be inserted.
 
-    This will create a set of `matplotlib.axes.Axes`, all lined up into a grid,
-    which are then returned to the user and which can be used to plot multiple
-    plots on a single figure.
+    This will create a set of :class:`matplotlib.axes.Axes`, all lined up into
+    a grid, which are then returned to the user and which can be used to plot
+    multiple plots on a single figure.
 
     Parameters
     ----------
@@ -1733,12 +1733,12 @@
 
     Returns
     -------
-    fig : `matplotlib.figure.Figure
+    fig : :class:`matplotlib.figure.Figure`
         The figure created inside which the axes reside
-    tr : list of list of `matplotlib.axes.Axes` objects
+    tr : list of list of :class:`matplotlib.axes.Axes` objects
         This is a list, where the inner list is along the x-axis and the outer
         is along the y-axis
-    cbars : list of `matplotlib.axes.Axes` objects
+    cbars : list of :class:`matplotlib.axes.Axes` objects
         Each of these is an axes onto which a colorbar can be placed.
 
     Notes


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -170,10 +170,11 @@
 
         Only ONE of the following options can be specified. If all 3 are
         specified, they will be used in the following precedence order:
-            ticks - a list of floating point numbers at which to put ticks
-            minmaxtick - display DEFAULT ticks with min & max also displayed
-            nticks - if ticks not specified, can automatically determine a
-               number of ticks to be evenly spaced in log space
+
+        * ``ticks`` - a list of floating point numbers at which to put ticks
+        * ``minmaxtick`` - display DEFAULT ticks with min & max also displayed
+        * ``nticks`` - if ticks not specified, can automatically determine a
+          number of ticks to be evenly spaced in log space
         """
         # This next call fixes some things, but is slower...
         self._redraw_image()


diff -r 88621ddd4ac89f698c4017a94c26ef69fc5015d8 -r b7610f1b5b45476641cd5ed3a2977513b16ac15e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -826,6 +826,59 @@
                              oc.sub_samples, oc.pf)
         return (left_camera, right_camera)
 
+class FisheyeCamera(Camera):
+    def __init__(self, center, radius, fov, resolution,
+                 transfer_function = None, fields = None,
+                 sub_samples = 5, log_fields = None, volume = None,
+                 pf = None, no_ghost=False):
+        ParallelAnalysisInterface.__init__(self)
+        if pf is not None: self.pf = pf
+        self.center = na.array(center, dtype='float64')
+        self.radius = radius
+        self.fov = fov
+        if iterable(resolution):
+            raise RuntimeError("Resolution must be a single int")
+        self.resolution = resolution
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields)
+        self.volume = volume
+
+    def snapshot(self):
+        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        # We now follow figures 4-7 of:
+        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+        # ...but all in Cython.
+        vp = arr_fisheye_vectors(self.resolution, self.fov)
+        vp.shape = (self.resolution**2,1,3)
+        uv = na.ones(3, dtype='float64')
+        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        vector_plane = VectorPlane(positions, vp, self.center,
+                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Rendering fisheye of %s^2", self.resolution)
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+
+        total_cells = 0
+        for brick in self.volume.traverse(None, self.center, image):
+            brick.cast_plane(tfp, vector_plane)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        image.shape = (self.resolution, self.resolution, 3)
+        return image
+
+
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
@@ -884,7 +937,7 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     image = na.zeros((resolution, resolution, 3), dtype='float64',
-                      order='C')
+                      order='F')
     normal_vector, north_vector, east_vector = ortho_find(normal_vector)
     unit_vectors = [north_vector, east_vector, normal_vector]
     back_center= center - 0.5*width * normal_vector
@@ -909,7 +962,6 @@
                 na.maximum(ma, this_point, ma)
     # Now we have a bounding box.
     grids = pf.h.region(center, mi, ma)._grids
-    print len(grids), len(pf.h.grids)
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [(grid[field] * grid.child_mask).astype("float64")



https://bitbucket.org/yt_analysis/yt/changeset/64a64eb7a7f9/
changeset:   64a64eb7a7f9
branch:      yt
user:        MatthewTurk
date:        2012-01-12 18:12:12
summary:     Merging in lighting from Sam
affected #:  6 files

diff -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 -r 64a64eb7a7f992eb407872bc638e15a55f1d23de yt/utilities/_amr_utils/FixedInterpolator.c
--- a/yt/utilities/_amr_utils/FixedInterpolator.c
+++ b/yt/utilities/_amr_utils/FixedInterpolator.c
@@ -128,7 +128,7 @@
 }
 
 void eval_gradient(int ds[3], npy_float64 dp[3],
-				  npy_float64 *data, npy_float64 grad[3])
+				  npy_float64 *data, npy_float64 *grad)
 {
     // We just take some small value
 


diff -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 -r 64a64eb7a7f992eb407872bc638e15a55f1d23de yt/utilities/_amr_utils/FixedInterpolator.h
--- a/yt/utilities/_amr_utils/FixedInterpolator.h
+++ b/yt/utilities/_amr_utils/FixedInterpolator.h
@@ -41,7 +41,7 @@
 npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
 				  npy_float64 *data);
 
-void eval_gradient(int ds[3], npy_float64 dp[3], npy_float64 *data, npy_float64 grad[3]);
+void eval_gradient(int ds[3], npy_float64 dp[3], npy_float64 *data, npy_float64 *grad);
 
 void vertex_interp(npy_float64 v1, npy_float64 v2, npy_float64 isovalue,
                    npy_float64 vl[3], npy_float64 dds[3],


diff -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 -r 64a64eb7a7f992eb407872bc638e15a55f1d23de yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -25,7 +25,7 @@
 
 cimport cython
 cimport numpy as np
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, fabs
 
 cdef struct FieldInterpolationTable:
     # Note that we make an assumption about retaining a reference to values
@@ -91,3 +91,30 @@
     for i in range(3):
         ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
         rgba[i] = dt*trgba[i] + ta * rgba[i]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void FIT_eval_transfer_with_light(np.float64_t dt, np.float64_t *dvs, 
+        np.float64_t *grad, np.float64_t *l_dir, np.float64_t *l_rgba,
+        np.float64_t *rgba, int n_fits,
+        FieldInterpolationTable fits[6],
+        int field_table_ids[6]) nogil:
+    cdef int i, fid, use
+    cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+    dot_prod = 0.0
+    for i in range(3):
+        dot_prod += l_dir[i]*grad[i]
+    #dot_prod = fmax(0.0, dot_prod)
+    for i in range(6): istorage[i] = 0.0
+    for i in range(n_fits):
+        istorage[i] = FIT_get_value(&fits[i], dvs)
+    for i in range(n_fits):
+        fid = fits[i].weight_table_id
+        if fid != -1: istorage[i] *= istorage[fid]
+    for i in range(6):
+        trgba[i] = istorage[field_table_ids[i]]
+    for i in range(3):
+        ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
+        rgba[i] = dt*trgba[i] + ta * rgba[i] + dt*dot_prod*l_rgba[i]*trgba[i]*l_rgba[3] #(trgba[0]+trgba[1]+trgba[2])
+


diff -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 -r 64a64eb7a7f992eb407872bc638e15a55f1d23de yt/utilities/_amr_utils/fp_utils.pxd
--- a/yt/utilities/_amr_utils/fp_utils.pxd
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -42,6 +42,10 @@
     if f0 < f1: return f0
     return f1
 
+cdef inline np.float64_t fabs(np.float64_t f0) nogil:
+    if f0 < 0.0: return -f0
+    return f0
+
 cdef inline int iclip(int i, int a, int b) nogil:
     if i < a: return a
     if i > b: return b


diff -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 -r 64a64eb7a7f992eb407872bc638e15a55f1d23de yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -31,7 +31,8 @@
 from stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
 from field_interpolation_tables cimport \
-    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer
+    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
+    FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
 
 from cython.parallel import prange, parallel, threadid
@@ -308,7 +309,7 @@
     cdef ImageAccumulator *im = <ImageAccumulator *> data
     cdef int i
     cdef np.float64_t dl = (exit_t - enter_t)
-    cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2]
+    cdef int di = (index[0]*vc.dims[1]+index[1])*vc.dims[2]+index[2] 
     for i in range(imin(3, vc.n_fields)):
         im.rgba[i] += vc.data[i][di] * dl
 
@@ -325,6 +326,8 @@
     np.float64_t star_er
     np.float64_t star_sigma_num
     kdtree_utils.kdtree *star_list
+    np.float64_t *light_dir
+    np.float64_t *light_rgba
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -369,6 +372,46 @@
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void volume_render_gradient_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6]
+    cdef np.float64_t *grad
+    grad = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vri.n_samples):
+        for j in range(vc.n_fields):
+            dvs[j] = offset_interpolate(vc.dims, dp,
+                    vc.data[j] + offset)
+        eval_gradient(vc.dims, dp, vc.data[0] + offset, grad)
+        FIT_eval_transfer_with_light(dt, dvs, grad, 
+                vri.light_dir, vri.light_rgba,
+                im.rgba, vri.n_fits, 
+                vri.fits, vri.field_table_ids)
+        for j in range(3):
+            dp[j] += ds[j]
+    free(grad)
+
 cdef class star_kdtree_container:
     cdef kdtree_utils.kdtree *tree
     cdef public np.float64_t sigma
@@ -525,6 +568,68 @@
         free(self.vra.fits)
         free(self.vra)
 
+cdef class LightSourceRenderSampler(ImageSampler):
+    cdef VolumeRenderAccumulator *vra
+    cdef public object tf_obj
+    cdef public object my_field_tables
+    def __cinit__(self, 
+                  np.ndarray vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
+                  tf_obj, n_samples = 10,
+                  light_dir=[1.,1.,1.],
+                  light_rgba=[1.,1.,1.,1.]):
+        ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+                               x_vec, y_vec, width)
+        cdef int i
+        cdef np.ndarray[np.float64_t, ndim=1] temp
+        # Now we handle tf_obj
+        self.vra = <VolumeRenderAccumulator *> \
+            malloc(sizeof(VolumeRenderAccumulator))
+        self.vra.fits = <FieldInterpolationTable *> \
+            malloc(sizeof(FieldInterpolationTable) * 6)
+        self.vra.n_fits = tf_obj.n_field_tables
+        assert(self.vra.n_fits <= 6)
+        self.vra.n_samples = n_samples
+        self.vra.light_dir = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+        self.vra.light_rgba = <np.float64_t *> malloc(sizeof(np.float64_t) * 4)
+        light_dir /= np.sqrt(light_dir[0]**2 + light_dir[1]**2 + light_dir[2]**2)
+        for i in range(3):
+            self.vra.light_dir[i] = light_dir[i]
+        for i in range(4):
+            self.vra.light_rgba[i] = light_rgba[i]
+        self.my_field_tables = []
+        for i in range(self.vra.n_fits):
+            temp = tf_obj.tables[i].y
+            FIT_initialize_table(&self.vra.fits[i],
+                      temp.shape[0],
+                      <np.float64_t *> temp.data,
+                      tf_obj.tables[i].x_bounds[0],
+                      tf_obj.tables[i].x_bounds[1],
+                      tf_obj.field_ids[i], tf_obj.weight_field_ids[i],
+                      tf_obj.weight_table_ids[i])
+            self.my_field_tables.append((tf_obj.tables[i],
+                                         tf_obj.tables[i].y))
+        for i in range(6):
+            self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
+        self.supp_data = <void *> self.vra
+
+    def setup(self, PartitionedGrid pg):
+        self.sampler = volume_render_gradient_sampler
+
+    def __dealloc__(self):
+        return
+        free(self.vra.fits)
+        free(self.vra)
+        free(self.light_dir)
+        free(self.light_rgba)
+
+
 cdef class GridFace:
     cdef int direction
     cdef public np.float64_t coord
@@ -685,16 +790,16 @@
            0.0 <= tl and tl < intersect_t:
             direction = i
             intersect_t = tl
-    if enter_t >= 0.0: intersect_t = enter_t
+    if enter_t >= 0.0: intersect_t = enter_t 
     if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
     for i in range(3):
         # Two things have to be set inside this loop.
         # cur_ind[i], the current index of the grid cell the ray is in
         # tmax[i], the 't' until it crosses out of the grid cell
-        tdelta[i] = step[i] * iv_dir[i] * vc.dds[i]
+        tdelta[i] = step[i] * iv_dir[i] * vc.dds[i] 
         if i == direction and step[i] > 0:
             # Intersection with the left face in this direction
-            cur_ind[i] = 0
+            cur_ind[i] = 0 
         elif i == direction and step[i] < 0:
             # Intersection with the right face in this direction
             cur_ind[i] = vc.dims[i] - 1


diff -r 029e0d6bd0978a3c4cbcefb28cf4cee743bc8f59 -r 64a64eb7a7f992eb407872bc638e15a55f1d23de yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -43,6 +43,7 @@
 
 from yt.utilities.amr_utils import \
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
+    LightSourceRenderSampler, \
     arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest, \
     pixelize_healpix
 
@@ -55,7 +56,7 @@
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
                  tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 le=None, re=None, use_light=False):
         r"""A viewpoint into a volume, for volume rendering.
 
         The camera represents the eye of an observer, which will be used to
@@ -215,6 +216,9 @@
         self.use_kd = use_kd
         self.l_max = l_max
         self.no_ghost = no_ghost
+        self.use_light = use_light
+        self.light_dir = None
+        self.light_rgba = None
         if self.no_ghost:
             mylog.info('Warning: no_ghost is currently True (default). This may lead to artifacts at grid boundaries.')
         self.tree_type = tree_type
@@ -358,7 +362,19 @@
                 image, self.unit_vectors[0], self.unit_vectors[1],
                 na.array(self.width),
                 self.transfer_function, self.sub_samples)
-        sampler = VolumeRenderSampler(*args)
+        if self.use_light:
+            if self.light_dir is None:
+                self.set_default_light_dir()
+            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = self.light_dir[0] * self.unit_vectors[1] + \
+                    self.light_dir[1] * self.unit_vectors[2] + \
+                    self.light_dir[2] * self.unit_vectors[0]
+            if self.light_rgba is None:
+                self.set_default_light_rgba()
+            sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
+                    light_rgba=self.light_rgba)
+        else:
+            sampler = VolumeRenderSampler(*args)
         self.volume.initialize_source()
 
         pbar = get_pbar("Ray casting",
@@ -379,6 +395,12 @@
 
         return image
 
+    def set_default_light_dir(self):
+        self.light_dir = [1.,1.,1.]
+
+    def set_default_light_rgba(self):
+        self.light_rgba = [1.,1.,1.,1.]
+
     def zoom(self, factor):
         r"""Change the distance to the focal point.
 
@@ -464,8 +486,8 @@
                     self.center += (na.array(final) - self.center) / (10. * n_steps)
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
-	    else:
-		dW = 1.0
+            else:
+                dW = 1.0
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
@@ -474,8 +496,8 @@
                     width = na.array([final_width, final_width, final_width]) 
                     # front/back, left/right, top/bottom
                 dW = (1.0*final_width-na.array(self.width))/n_steps
-	    else:
-		dW = 1.0
+            else:
+                dW = 1.0
             dx = (na.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:



https://bitbucket.org/yt_analysis/yt/changeset/e6686117fdf9/
changeset:   e6686117fdf9
branch:      yt
user:        MatthewTurk
date:        2012-01-12 19:32:17
summary:     Moving the nogil block outside and changing scheduling for HEALpix openmp.
Long-term we should give HEALpix its own integrator.
affected #:  1 file

diff -r 64a64eb7a7f992eb407872bc638e15a55f1d23de -r e6686117fdf96bd02f71a4ea2ff06a5f77c14078 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -228,7 +228,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def __call__(self, PartitionedGrid pg):
+    def __call__(self, PartitionedGrid pg, int num_threads = 0):
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
@@ -261,12 +261,11 @@
         cdef np.float64_t width[3] 
         for i in range(3):
             width[i] = self.width[i]
-        with nogil, parallel():
-            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-            idata.supp_data = self.supp_data
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            if im.vd_strides[0] == -1:
+        if im.vd_strides[0] == -1:
+            with nogil, parallel():
+                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+                idata.supp_data = self.supp_data
+                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 for j in prange(size, schedule="dynamic"):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
@@ -282,10 +281,17 @@
                     walk_volume(vc, v_pos, im.vp_dir, self.sampler,
                                 (<void *> idata))
                     for i in range(3): im.image[i + offset] = idata.rgba[i]
-            else:
+                free(idata)
+                free(v_pos)
+        else:
+            with nogil, parallel():
+                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+                idata.supp_data = self.supp_data
+                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 # If we do not have a simple image plane, we have to cast all
                 # our rays 
-                for j in prange(size, schedule="dynamic"):
+                for j in prange(size, schedule="guided"):
                     offset = j * 3
                     for i in range(3): v_pos[i] = im.vp_pos[i + offset]
                     for i in range(3): v_dir[i] = im.vp_dir[i + offset]
@@ -293,9 +299,9 @@
                     walk_volume(vc, v_pos, v_dir, self.sampler, 
                                 (<void *> idata))
                     for i in range(3): im.image[i + offset] = idata.rgba[i]
-            free(v_dir)
-            free(idata)
-            free(v_pos)
+                free(v_dir)
+                free(idata)
+                free(v_pos)
         return hit
 
 cdef void projection_sampler(



https://bitbucket.org/yt_analysis/yt/changeset/7d6eaa802158/
changeset:   7d6eaa802158
branch:      yt
user:        samskillman
date:        2012-01-30 18:25:49
summary:     Taking out slope optimization, as the interpolation within a cell is not linear.
affected #:  1 file

diff -r e6686117fdf96bd02f71a4ea2ff06a5f77c14078 -r 7d6eaa8021589d8dad8315c4fa99d483f7afe3f7 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -360,23 +360,14 @@
         dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
         dp[i] *= vc.idds[i]
         ds[i] = v_dir[i] * vc.idds[i] * dt
-    for i in range(vc.n_fields):
-        slopes[i] = offset_interpolate(vc.dims, dp,
-                        vc.data[i] + offset)
-    for i in range(3):
-        dp[i] += ds[i] * vri.n_samples
-    cdef np.float64_t temp
-    for i in range(vc.n_fields):
-        temp = slopes[i]
-        slopes[i] -= offset_interpolate(vc.dims, dp,
-                         vc.data[i] + offset)
-        slopes[i] *= -1.0/vri.n_samples
-        dvs[i] = temp
-    for dti in range(vri.n_samples): 
-        FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
-                          vri.field_table_ids)
-        for i in range(vc.n_fields):
-            dvs[i] += slopes[i]
+    for i in range(vri.n_samples):
+        for j in range(vc.n_fields):
+            dvs[j] = offset_interpolate(vc.dims, dp,
+                    vc.data[j] + offset)
+        FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, 
+                vri.fits, vri.field_table_ids)
+        for j in range(3):
+            dp[j] += ds[j]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)



https://bitbucket.org/yt_analysis/yt/changeset/4fb894499f2d/
changeset:   4fb894499f2d
branch:      yt
user:        samskillman
date:        2012-01-31 16:57:59
summary:     Changing the alpha-blending technique to allow for more opaque transfer functions.  This is enabled by not clipping the transfer function to 1.0, and calculating the absorption using an exponential function rather than using exp(-x) =~ 1-x.  This also comes with changes to image reduction.
affected #:  3 files

diff -r 7d6eaa8021589d8dad8315c4fa99d483f7afe3f7 -r 4fb894499f2dbd21cfbee72cff58aaafdbb5045f yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -39,6 +39,9 @@
     int weight_table_id
     int nbins
 
+cdef extern from "math.h": 
+    double expf(double x) nogil 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -88,9 +91,10 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
+    ta = expf(-fmax(dt*(trgba[3] + trgba[4] + trgba[5]), 0.0))
     for i in range(3):
-        ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
-        rgba[i] = dt*trgba[i] + ta * rgba[i]
+        # ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
+        rgba[i] = dt*trgba[i+3] + ta * rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -114,7 +118,7 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
+    ta = expf(-fmax(dt*(trgba[3] + trgba[4] + trgba[5]), 0.0))
     for i in range(3):
-        ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
-        rgba[i] = dt*trgba[i] + ta * rgba[i] + dt*dot_prod*l_rgba[i]*trgba[i]*l_rgba[3] #(trgba[0]+trgba[1]+trgba[2])
+        rgba[i] = dt*trgba[i+3]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
 


diff -r 7d6eaa8021589d8dad8315c4fa99d483f7afe3f7 -r 4fb894499f2dbd21cfbee72cff58aaafdbb5045f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1131,14 +1131,14 @@
             return
         self.image = image
 
-        viewpoint = front_center
+        viewpoint = front_center 
 
         for node in self.viewpoint_traverse(viewpoint):
             if node.grid is not None:
                 if node.brick is not None:
                     yield node.brick
          
-        self.reduce_tree_images(self.tree, front_center)
+        self.reduce_tree_images(self.tree, viewpoint)
         self.comm.barrier()
         
     def reduce_tree_images(self, tree, viewpoint, image=None):
@@ -1154,7 +1154,7 @@
             try:
                 my_node.left_child.owner = my_node.owner
                 my_node.right_child.owner = my_node.owner + 2**(rounds-(i+1))
-                if path[i+1] is '0': 
+                if path[i+1] == '0': 
                     my_node = my_node.left_child
                     my_node_id = my_node.id
                 else:
@@ -1163,10 +1163,11 @@
             except:
                 rounds = i-1
         for thisround in range(rounds,0,-1):
+            self.image[self.image>1.0]=1.0
             #print self.comm.rank, 'my node', my_node_id
             parent = my_node.parent
             #print parent['split_ax'], parent['split_pos']
-            if viewpoint[parent.split_ax] <= parent.split_pos:
+            if viewpoint[parent.split_ax] > parent.split_pos:
                 front = parent.right_child
                 back = parent.left_child
             else:
@@ -1181,15 +1182,12 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
+                    ta = na.exp(-na.sum(self.image,axis=2))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
-                        #print arr2.shape
-                        #                ta = (1.0 - arr2[:,:,i+3])
-                        ta = (1.0 - na.sum(self.image,axis=2))
-                        ta[ta<0.0] = 0.0 
-                        self.image[:,:,i  ] = self.image[:,:,i  ] + ta * arr2[:,:,i  ]
+                        self.image[:,:,i  ] = self.image[:,:,i  ] + (ta)*arr2[:,:,i  ]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
@@ -1204,16 +1202,14 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
+                    ta = na.exp(-na.sum(arr2,axis=2))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
                         # print arr2.shape
-                        # ta = (1.0 - arr2[:,:,i+3])
-                        ta = (1.0 - na.sum(arr2,axis=2))
-                        ta[ta<0.0] = 0.0 
                         self.image[:,:,i  ] = arr2[:,:,i  ] + ta * self.image[:,:,i  ]
-                        # image[:,:,i+3] = arr2[:,:,i+3] + ta * image[:,:,i+3]
+
             # Set parent owner to back owner
             # my_node = (my_node-1)>>1
             if self.comm.rank == my_node.parent.owner: 


diff -r 7d6eaa8021589d8dad8315c4fa99d483f7afe3f7 -r 4fb894499f2dbd21cfbee72cff58aaafdbb5045f yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -89,7 +89,7 @@
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
         vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -156,7 +156,7 @@
         """
         vals = na.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
 
     def add_filtered_planck(self, wavelength, trans):
         vals = na.zeros(self.x.shape, 'float64')



https://bitbucket.org/yt_analysis/yt/changeset/446266338e1f/
changeset:   446266338e1f
branch:      yt
user:        samskillman
date:        2012-02-01 21:43:06
summary:     Adding ProjectionCamera, getting rid of a failed import.
affected #:  2 files

diff -r 4fb894499f2dbd21cfbee72cff58aaafdbb5045f -r 446266338e1f27c861a6ad4f54d2398a48d42de7 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -69,8 +69,7 @@
 from .halo_profiler.api import \
     VirialFilter, \
     HaloProfiler, \
-    FakeProfile, \
-    shift_projections
+    FakeProfile
 
 from .hierarchy_subset.api import \
     ConstructedRootGrid, \


diff -r 4fb894499f2dbd21cfbee72cff58aaafdbb5045f -r 446266338e1f27c861a6ad4f54d2398a48d42de7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -926,6 +926,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
+    
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -937,7 +938,7 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     image = na.zeros((resolution, resolution, 3), dtype='float64',
-                      order='F')
+                      order='C')
     normal_vector, north_vector, east_vector = ortho_find(normal_vector)
     unit_vectors = [north_vector, east_vector, normal_vector]
     back_center= center - 0.5*width * normal_vector
@@ -1103,3 +1104,84 @@
     canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
     canvas.print_figure(fn)
     return img, count
+
+class ProjectionCamera(Camera):
+    def __init__(self, pf, center, normal_vector, width, resolution,
+            field, weight=None, volume=None, le=None, re=None,
+            north_vector=None):
+        Camera.__init__(self, center, normal_vector, width, resolution, None,
+                fields = field, pf=pf, volume=1,
+                le=le, re=re, north_vector=north_vector)
+        self.field = field
+        self.weight = weight
+        self.resolution = resolution
+
+    def snapshot(self):
+        fields = [self.field]
+        resolution = self.resolution
+        width = self.width[2]
+        pf = self.pf
+        if self.weight is not None:
+            # This is a temporary field, which we will remove at the end.
+            def _make_wf(f, w):
+                def temp_weightfield(a, b):
+                    tr = b[f].astype("float64") * b[w]
+                    return tr
+                return temp_weightfield
+            pf.field_info.add_field("temp_weightfield",
+                function=_make_wf(self.field, self.weight))
+            fields = ["temp_weightfield", self.weight]
+        image = na.zeros((resolution, resolution, 3), dtype='float64',
+                          order='C')
+
+        north_vector = self.unit_vectors[0]
+        east_vector = self.unit_vectors[1]
+        normal_vector = self.unit_vectors[2]
+
+        back_center= self.center - 0.5*width * normal_vector
+        rotp = na.concatenate([na.linalg.pinv(self.unit_vectors).ravel('F'),
+                               back_center])
+
+        sampler = ProjectionSampler(
+            rotp, normal_vector * width, back_center,
+            (-width/2, width/2, -width/2, width/2),
+            image, north_vector, east_vector,
+            na.array([width, width, width], dtype='float64'))
+        
+        # Calculate the eight corners of the box
+        # Back corners ...
+        mi = pf.domain_right_edge.copy()
+        ma = pf.domain_left_edge.copy()
+        for off1 in [-1, 1]:
+            for off2 in [-1, 1]:
+                for off3 in [-1, 1]:
+                    this_point = (self.center + width/2. * off1 * north_vector
+                                         + width/2. * off2 * east_vector
+                                         + width/2. * off3 * normal_vector)
+                    na.minimum(mi, this_point, mi)
+                    na.maximum(ma, this_point, ma)
+        # Now we have a bounding box.
+        grids = pf.h.region(self.center, mi, ma)._grids
+
+        pb = get_pbar("Sampling ", len(grids))
+        for i,grid in enumerate(grids):
+            data = [(grid[field] * grid.child_mask).astype("float64")
+                    for field in fields]
+            pg = PartitionedGrid(
+                grid.id, data,
+                grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
+            grid.clear_data()
+            sampler(pg)
+            pb.update(i)
+        pb.finish()
+        image = sampler.aimage
+        if self.weight is None:
+            dl = width * pf.units[pf.field_info[self.field].projection_conversion]
+            image *= dl
+        else:
+            image[:,:,0] /= image[:,:,1]
+            pf.field_info.pop("temp_weightfield")
+        return image[:,:,0]
+
+data_object_registry["projection_camera"] = ProjectionCamera
+



https://bitbucket.org/yt_analysis/yt/changeset/518773daaefa/
changeset:   518773daaefa
branch:      yt
user:        samskillman
date:        2012-03-20 21:03:35
summary:     Changing trgba[3,4,5] to [0,1,2] and [i+3] to [i].  I'm not quite sure why 3-5 are not working correctly, so this should be examined later.
affected #:  1 file

diff -r 446266338e1f27c861a6ad4f54d2398a48d42de7 -r 518773daaefaa8c4b10ed412bd038aa1e2a378da yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -91,10 +91,10 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
-    ta = expf(-fmax(dt*(trgba[3] + trgba[4] + trgba[5]), 0.0))
+    ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
     for i in range(3):
         # ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
-        rgba[i] = dt*trgba[i+3] + ta * rgba[i]
+        rgba[i] = dt*trgba[i] + ta * rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -118,7 +118,7 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
-    ta = expf(-fmax(dt*(trgba[3] + trgba[4] + trgba[5]), 0.0))
+    ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
     for i in range(3):
-        rgba[i] = dt*trgba[i+3]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
+        rgba[i] = dt*trgba[i]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
 



https://bitbucket.org/yt_analysis/yt/changeset/9bee6357e300/
changeset:   9bee6357e300
branch:      yt
user:        MatthewTurk
date:        2012-04-03 22:58:30
summary:     Merging from the development tip.
affected #:  159 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/9cf25cdabe8e/
changeset:   9cf25cdabe8e
branch:      yt
user:        MatthewTurk
date:        2012-04-03 23:21:45
summary:     Fix compile issue that I missed.
affected #:  1 file

diff -r 9bee6357e300c5d22993b8ed4fe3d46b3af1688d -r 9cf25cdabe8ea7b1eb8571ed70e4315b85c44573 yt/utilities/_amr_utils/QuadTree.pyx
--- a/yt/utilities/_amr_utils/QuadTree.pyx
+++ b/yt/utilities/_amr_utils/QuadTree.pyx
@@ -32,6 +32,7 @@
 
 from stdlib cimport malloc, free, abs
 from cython.operator cimport dereference as deref, preincrement as inc
+from fp_utils cimport fmax
 
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int



https://bitbucket.org/yt_analysis/yt/changeset/dfa80f7267ad/
changeset:   dfa80f7267ad
branch:      yt
user:        MatthewTurk
date:        2012-04-04 02:55:12
summary:     Add in num_threads support, pass it in through snapshot.  Requires Cython 0.16.
affected #:  2 files

diff -r 9cf25cdabe8ea7b1eb8571ed70e4315b85c44573 -r dfa80f7267ad0c40f9568a9a59f31988f655212a yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -262,7 +262,7 @@
         for i in range(3):
             width[i] = self.width[i]
         if im.vd_strides[0] == -1:
-            with nogil, parallel():
+            with nogil, parallel(num_threads = num_threads):
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 idata.supp_data = self.supp_data
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))


diff -r 9cf25cdabe8ea7b1eb8571ed70e4315b85c44573 -r dfa80f7267ad0c40f9568a9a59f31988f655212a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -333,7 +333,8 @@
                 self.unit_vectors[0],
                 self.unit_vectors[1])
 
-    def snapshot(self, fn = None, clip_ratio = None, double_check = False):
+    def snapshot(self, fn = None, clip_ratio = None, double_check = False,
+                 num_threads = 0):
         r"""Ray-cast the camera.
 
         This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -386,7 +387,7 @@
                     if na.any(na.isnan(data)):
                         raise RuntimeError
         for brick in self.volume.traverse(self.back_center, self.front_center, image):
-            sampler(brick)
+            sampler(brick, num_threads = num_threads)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()



https://bitbucket.org/yt_analysis/yt/changeset/f8d6325f2b2c/
changeset:   f8d6325f2b2c
branch:      yt
user:        MatthewTurk
date:        2012-04-04 12:48:56
summary:     Make setup() an overridden function, so that it is cdef'd, and add num_threads
to off_axis_projection.
affected #:  2 files

diff -r dfa80f7267ad0c40f9568a9a59f31988f655212a -r f8d6325f2b2ccb7d55fead4900b7578ce385ef87 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -304,6 +304,9 @@
                 free(v_pos)
         return hit
 
+    cdef void setup(self, PartitionedGrid pg):
+        return
+
 cdef void projection_sampler(
                  VolumeContainer *vc, 
                  np.float64_t v_pos[3],
@@ -320,7 +323,7 @@
         im.rgba[i] += vc.data[i][di] * dl
 
 cdef class ProjectionSampler(ImageSampler):
-    def setup(self, PartitionedGrid pg):
+    cdef void setup(self, PartitionedGrid pg):
         self.sampler = projection_sampler
 
 cdef struct VolumeRenderAccumulator:
@@ -553,7 +556,7 @@
                 skdc = star_list[i]
                 self.trees[i] = skdc.tree
 
-    def setup(self, PartitionedGrid pg):
+    cdef void setup(self, PartitionedGrid pg):
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
@@ -616,7 +619,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
 
-    def setup(self, PartitionedGrid pg):
+    cdef void setup(self, PartitionedGrid pg):
         self.sampler = volume_render_gradient_sampler
 
     def __dealloc__(self):


diff -r dfa80f7267ad0c40f9568a9a59f31988f655212a -r f8d6325f2b2ccb7d55fead4900b7578ce385ef87 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1305,7 +1305,7 @@
 
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None):
+                        field, weight = None, num_threads = 0):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1596,7 +1596,7 @@
                 grid.id, data,
                 grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
             grid.clear_data()
-            sampler(pg)
+            sampler(pg, num_threads = num_threads)
             pb.update(i)
         pb.finish()
         image = sampler.aimage



https://bitbucket.org/yt_analysis/yt/changeset/3f63f2257cf3/
changeset:   3f63f2257cf3
branch:      yt
user:        samskillman
date:        2012-04-24 19:45:48
summary:     Working on opaque transfer function, requiring a modification to the integration of the transfer function. Also added a map_to_colormap function to the TF, which directly maps a colormap to a range of values in the transfer function.
affected #:  3 files

diff -r 9cf25cdabe8ea7b1eb8571ed70e4315b85c44573 -r 3f63f2257cf3655779bbb98e0a1de115f2c64f03 yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -82,7 +82,7 @@
                             FieldInterpolationTable fits[6],
                             int field_table_ids[6]) nogil:
     cdef int i, fid, use
-    cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+    cdef np.float64_t ta, tf, ttot, istorage[6], trgba[6], dot_prod
     for i in range(6): istorage[i] = 0.0
     for i in range(n_fits):
         istorage[i] = FIT_get_value(&fits[i], dvs)
@@ -91,10 +91,10 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
-    ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
+    ttot = trgba[0] + trgba[1] + trgba[2]
+    ta = expf(-dt*fmax(ttot, 0.0))
     for i in range(3):
-        # ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
-        rgba[i] = dt*trgba[i] + ta * rgba[i]
+        rgba[i] = (1.0-ta)*trgba[i] + ta*rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -120,5 +120,5 @@
         trgba[i] = istorage[field_table_ids[i]]
     ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
     for i in range(3):
-        rgba[i] = dt*trgba[i]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
+        rgba[i] = (1.-ta)*trgba[i]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
 


diff -r 9cf25cdabe8ea7b1eb8571ed70e4315b85c44573 -r 3f63f2257cf3655779bbb98e0a1de115f2c64f03 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1131,7 +1131,7 @@
             return
         self.image = image
 
-        viewpoint = front_center 
+        viewpoint = front_center - back_center 
 
         for node in self.viewpoint_traverse(viewpoint):
             if node.grid is not None:


diff -r 9cf25cdabe8ea7b1eb8571ed70e4315b85c44573 -r 3f63f2257cf3655779bbb98e0a1de115f2c64f03 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -560,6 +560,20 @@
         mylog.debug("Adding gaussian at %s with width %s and colors %s" % (
                 v, w, (r,g,b,alpha)))
 
+    def map_to_colormap(self, mi, ma, scale=1.0, colormap="gist_stern"):
+        rel0 = int(self.nbins*(mi - self.x_bounds[0])/(self.x_bounds[1] -
+            self.x_bounds[0]))
+        rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
+            self.x_bounds[0]))
+        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        cmap = get_cmap(colormap)
+        cc = cmap(tomap)*scale
+        
+        self.red.y[rel0:rel1]  = cc[:,0]
+        self.green.y[rel0:rel1]= cc[:,1]
+        self.blue.y[rel0:rel1] = cc[:,2]
+        self.alpha.y[rel0:rel1]= cc[:,3]
+
     def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
                    colormap="gist_stern", col_bounds = None):
         r"""Add a set of Gaussians based on an existing colormap.



https://bitbucket.org/yt_analysis/yt/changeset/ce884d57f024/
changeset:   ce884d57f024
branch:      yt
user:        samskillman
date:        2012-04-26 01:00:39
summary:     ft2build.h in in freetype_inc, not freetype_inc/freetype2.  I don't know if freetype2/ should be in that list at all...I think not, but will defer to others.
affected #:  1 file

diff -r 3f63f2257cf3655779bbb98e0a1de115f2c64f03 -r ce884d57f0241c41b0602330f6621d0a52d2db29 yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -121,7 +121,7 @@
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
     config.add_extension("freetype_writer", 
                 ["yt/utilities/_amr_utils/freetype_writer.pyx"],
-                include_dirs = [os.path.join(freetype_inc, "freetype2")],
+                include_dirs = [freetype_inc,os.path.join(freetype_inc, "freetype2")],
                 library_dirs = [freetype_lib], libraries=["freetype"],
                 depends=["yt/utilities/_amr_utils/freetype_includes.h"])
     config.add_extension("geometry_utils", 



https://bitbucket.org/yt_analysis/yt/changeset/ccb6d90c2b06/
changeset:   ccb6d90c2b06
branch:      yt
user:        MatthewTurk
date:        2012-04-26 17:23:04
summary:     Merging from Sam
affected #:  4 files

diff -r f8d6325f2b2ccb7d55fead4900b7578ce385ef87 -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -82,7 +82,7 @@
                             FieldInterpolationTable fits[6],
                             int field_table_ids[6]) nogil:
     cdef int i, fid, use
-    cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
+    cdef np.float64_t ta, tf, ttot, istorage[6], trgba[6], dot_prod
     for i in range(6): istorage[i] = 0.0
     for i in range(n_fits):
         istorage[i] = FIT_get_value(&fits[i], dvs)
@@ -91,10 +91,10 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
-    ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
+    ttot = trgba[0] + trgba[1] + trgba[2]
+    ta = expf(-dt*fmax(ttot, 0.0))
     for i in range(3):
-        # ta = fmax((1.0 - dt*trgba[i+3]), 0.0)
-        rgba[i] = dt*trgba[i] + ta * rgba[i]
+        rgba[i] = (1.0-ta)*trgba[i] + ta*rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -120,5 +120,5 @@
         trgba[i] = istorage[field_table_ids[i]]
     ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
     for i in range(3):
-        rgba[i] = dt*trgba[i]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
+        rgba[i] = (1.-ta)*trgba[i]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
 


diff -r f8d6325f2b2ccb7d55fead4900b7578ce385ef87 -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d yt/utilities/_amr_utils/setup.py
--- a/yt/utilities/_amr_utils/setup.py
+++ b/yt/utilities/_amr_utils/setup.py
@@ -121,7 +121,7 @@
                 libraries=["m"], depends=["yt/utilities/_amr_utils/fp_utils.pxd"])
     config.add_extension("freetype_writer", 
                 ["yt/utilities/_amr_utils/freetype_writer.pyx"],
-                include_dirs = [os.path.join(freetype_inc, "freetype2")],
+                include_dirs = [freetype_inc,os.path.join(freetype_inc, "freetype2")],
                 library_dirs = [freetype_lib], libraries=["freetype"],
                 depends=["yt/utilities/_amr_utils/freetype_includes.h"])
     config.add_extension("geometry_utils", 


diff -r f8d6325f2b2ccb7d55fead4900b7578ce385ef87 -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1131,7 +1131,7 @@
             return
         self.image = image
 
-        viewpoint = front_center 
+        viewpoint = front_center - back_center 
 
         for node in self.viewpoint_traverse(viewpoint):
             if node.grid is not None:


diff -r f8d6325f2b2ccb7d55fead4900b7578ce385ef87 -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -560,6 +560,20 @@
         mylog.debug("Adding gaussian at %s with width %s and colors %s" % (
                 v, w, (r,g,b,alpha)))
 
+    def map_to_colormap(self, mi, ma, scale=1.0, colormap="gist_stern"):
+        rel0 = int(self.nbins*(mi - self.x_bounds[0])/(self.x_bounds[1] -
+            self.x_bounds[0]))
+        rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
+            self.x_bounds[0]))
+        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        cmap = get_cmap(colormap)
+        cc = cmap(tomap)*scale
+        
+        self.red.y[rel0:rel1]  = cc[:,0]
+        self.green.y[rel0:rel1]= cc[:,1]
+        self.blue.y[rel0:rel1] = cc[:,2]
+        self.alpha.y[rel0:rel1]= cc[:,3]
+
     def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
                    colormap="gist_stern", col_bounds = None):
         r"""Add a set of Gaussians based on an existing colormap.



https://bitbucket.org/yt_analysis/yt/changeset/48d9836857da/
changeset:   48d9836857da
branch:      yt
user:        MatthewTurk
date:        2012-04-26 23:02:34
summary:     Merging from tip
affected #:  12 files

diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -42,6 +42,7 @@
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
+INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -238,6 +239,10 @@
 get_willwont ${INST_PYX}
 echo "be installing PyX"
 
+printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
+get_willwont ${INST_0MQ}
+echo "be installing ZeroMQ"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -268,7 +273,15 @@
 
 function do_exit
 {
-    echo "Failure.  Check ${LOG_FILE}."
+    echo "********************************************"
+    echo "        FAILURE REPORT:"
+    echo "********************************************"
+    echo
+    tail -n 10 ${LOG_FILE}
+    echo
+    echo "********************************************"
+    echo "********************************************"
+    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
     exit 1
 }
 
@@ -357,20 +370,21 @@
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
+echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
+echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 
 # Individual processes
-if [ -z "$HDF5_DIR" ]
-then
-    echo "Downloading HDF5"
-    get_enzotools hdf5-1.8.7.tar.gz
-fi
-
+[ -z "$HDF5_DIR" ] && get_enzotools hdf5-1.8.7.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_enzotools zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_enzotools bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_enzotools libpng-1.2.43.tar.gz
 [ $INST_FTYPE -eq 1 ] && get_enzotools freetype-2.4.4.tar.gz
 [ $INST_SQLITE3 -eq 1 ] && get_enzotools sqlite-autoconf-3070500.tar.gz
 [ $INST_PYX -eq 1 ] && get_enzotools PyX-0.11.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools zeromq-2.2.0.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools pyzmq-2.1.11.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools tornado-2.2.tar.gz
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
@@ -596,6 +610,25 @@
 [ -n "${OLD_LDFLAGS}" ] && export LDFLAGS=${OLD_LDFLAGS}
 [ -n "${OLD_CXXFLAGS}" ] && export CXXFLAGS=${OLD_CXXFLAGS}
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
+
+# Now we do our IPython installation, which has two optional dependencies.
+if [ $INST_0MQ -eq 1 ]
+then
+    if [ ! -e zeromq-2.2.0/done ]
+    then
+        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        echo "Installing ZeroMQ"
+        cd zeromq-2.2.0
+        ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
+    do_setup_py tornado-2.2
+fi
+
 do_setup_py ipython-0.12
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/analysis_modules/simulation_handler/enzo_simulation.py
--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py
@@ -49,7 +49,8 @@
                  initial_redshift=None, final_redshift=None,
                  links=False, enzo_parameters=None, 
                  get_time_outputs=True, get_redshift_outputs=True, 
-                 get_available_data=False, get_data_by_force=False):
+                 get_available_data=False, get_data_by_force=False,
+                 parallel = True):
         r"""Initialize an Enzo Simulation object.
         
         initial_time : float
@@ -128,9 +129,9 @@
         self._get_all_outputs(brute_force=get_data_by_force)
 
         # Instantiate a TimeSeriesData object.
-        time_series_outputs = [load(output['filename']) \
-                                   for output in self.allOutputs]
-        TimeSeriesData.__init__(self, outputs=time_series_outputs)
+        time_series_outputs = [output['filename'] for output in self.allOutputs]
+        TimeSeriesData.__init__(self, outputs=time_series_outputs,
+                                parallel = parallel)
 
     def _calculate_redshift_dump_times(self):
         "Calculates time from redshift of redshift dumps."


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import h5py
 from yt.mods import *
 #from yt.utilities.math_utils import *
 from yt.utilities.performance_counters import yt_counters, time_function


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -827,7 +827,7 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-    def to_frb(self, width, resolution, center = None):
+    def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
 
@@ -842,6 +842,8 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
+        height : height specifier
+            This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
         center : array-like of floats, optional
@@ -868,13 +870,18 @@
         if iterable(width):
             w, u = width
             width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
-        bounds = (center[xax] - width/2.0, center[xax] + width/2.0,
-                  center[yax] - width/2.0, center[yax] + width/2.0)
+        bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
+                  center[yax] - height*0.5, center[yax] + height*0.5)
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
@@ -1274,7 +1281,7 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
-    def to_frb(self, width, resolution):
+    def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
 
@@ -1292,6 +1299,8 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
+        height : height specifier
+            This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
 
@@ -1313,10 +1322,15 @@
         if iterable(width):
             w, u = width
             width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
+        bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
         frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
         return frb
 
@@ -3470,62 +3484,16 @@
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))
-        self.global_endindex = None
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        # Check for ill-behaved AMR schemes (Enzo) where we may have
-        # root-tile-boundary issues.  This is specific to the root tiles not
-        # allowing grids to cross them and also allowing > 1 level of
-        # difference between neighboring areas.
-        nz = 0
-        buf = 0.0
-        self.min_level = 0
-        dl = ((self.global_startindex.astype("float64") + 1)
-           / (self.pf.refine_by**self.level))
-        dr = ((self.global_startindex.astype("float64")
-              + self.ActiveDimensions - 1)
-           / (self.pf.refine_by**self.level))
-        if na.any(dl == na.rint(dl)) or na.any(dr == na.rint(dr)):
-            nz = 2 * self.pf.refine_by**self.level
-            buf = self._base_dx
-        if nz <= self.pf.refine_by**3: # delta level of 3
-            last_buf = [None,None,None]
-            count = 0
-            # Repeat until no more grids are covered (up to a delta level of 3)
-            while na.any(buf != last_buf) or count == 3:
-                cg = self.pf.h.covering_grid(self.level,
-                     self.left_edge - buf, self.ActiveDimensions + nz)
-                cg._use_pbar = False
-                count = cg.ActiveDimensions.prod()
-                for g in cg._grids:
-                    count -= cg._get_data_from_grid(g, [])
-                    if count <= 0:
-                        self.min_level = g.Level
-                        break
-                last_buf = buf
-                # Increase box by 2 cell widths at the min covering level
-                buf = 2*self._base_dx / self.pf.refine_by**self.min_level
-                nz += 4 * self.pf.refine_by**(self.level-self.min_level)
-                count += 1
-        else:
-            nz = buf = 0
-            self.min_level = 0
-        # This should not cost substantial additional time.
-        BLE = self.left_edge - buf
-        BRE = self.right_edge + buf
-        if na.any(BLE < self.pf.domain_left_edge) or \
-           na.any(BRE > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
-                            BLE, BRE, self.level, self.min_level)
-        else:
-            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
-                BLE, BRE, self.level,
-                min(self.level, self.min_level))
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
-        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)]
+        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
+                 / self.pf.domain_dimensions).max()
+        AMRCoveringGridBase._get_list_of_grids(self, buffer)
+        # We reverse the order to ensure that coarse grids are first
+        self._grids = self._grids[::-1]
 
     def get_data(self, field=None):
         self._get_list_of_grids()
@@ -3541,11 +3509,11 @@
         # We jump-start our task here
         mylog.debug("Getting fields %s from %s possible grids",
                    fields_to_get, len(self._grids))
-        self._update_level_state(self.min_level, fields_to_get, initialize=True)
+        self._update_level_state(0, fields_to_get)
         if self._use_pbar: pbar = \
                 get_pbar('Searching grids for values ', len(self._grids))
         # The grids are assumed to be pre-sorted
-        last_level = self.min_level
+        last_level = 0
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
@@ -3563,31 +3531,27 @@
                     raise KeyError(n_bad)
         if self._use_pbar: pbar.finish()
 
-    def _update_level_state(self, level, fields = None, initialize=False):
+    def _update_level_state(self, level, fields = None):
         dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
-        RL = self.right_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self._old_global_endindex = self.global_endindex
-        # We use one grid cell at LEAST, plus one buffer on all sides
-        self.global_startindex = na.floor(LL / dx).astype('int64') - 1
-        self.global_endindex = na.ceil(RL / dx).astype('int64') + 1
+        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
         self.domain_width = na.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
-        if (level == 0 or initialize) and self.level > 0:
-            idims = self.global_endindex - self.global_startindex
+        if level == 0 and self.level > 0:
+            # We use one grid cell at LEAST, plus one buffer on all sides
+            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
-        elif (level == 0 or initialize) and self.level == 0:
+        elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
-            #idims = self.global_endindex - self.global_startindex
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
@@ -3596,16 +3560,15 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf
-        input_right = (self._old_global_endindex - 0.5) * rf
-        output_left = self.global_startindex + 0.5
-        output_right = self.global_endindex - 0.5
-        output_dims = (output_right - output_left + 1).astype('int32')
+        input_left = (self._old_global_startindex + 0.5) * rf 
+        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = na.rint((self.right_edge-self.left_edge)/dx+0.5).astype('int32') + 2
 
         self._cur_dims = output_dims
 
         for field in fields:
             output_field = na.zeros(output_dims, dtype="float64")
+            output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
             self.field_data[field] = output_field


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -326,9 +326,9 @@
             return None
 
         full_name = "%s/%s" % (node, name)
-        try:
+        if len(self._data_file[full_name].shape) > 0:
             return self._data_file[full_name][:]
-        except TypeError:
+        else:
             return self._data_file[full_name]
 
     def _close_data_file(self):
@@ -337,18 +337,6 @@
             del self._data_file
             self._data_file = None
 
-    def _deserialize_hierarchy(self, harray):
-        # THIS IS BROKEN AND NEEDS TO BE FIXED
-        mylog.debug("Cached entry found.")
-        self.gridDimensions[:] = harray[:,0:3]
-        self.gridStartIndices[:] = harray[:,3:6]
-        self.gridEndIndices[:] = harray[:,6:9]
-        self.gridLeftEdge[:] = harray[:,9:12]
-        self.gridRightEdge[:] = harray[:,12:15]
-        self.gridLevels[:] = harray[:,15:16]
-        self.gridTimes[:] = harray[:,16:17]
-        self.gridNumberOfParticles[:] = harray[:,17:18]
-
     def get_smallest_dx(self):
         """
         Returns (in code units) the smallest cell size in the simulation.


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -76,11 +76,10 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs = None, parallel = True):
-        if outputs is None: outputs = []
-        self.outputs = outputs
+    def __init__(self, outputs, parallel = True):
         self.tasks = AnalysisTaskProxy(self)
         self.params = TimeSeriesParametersContainer(self)
+        self._pre_outputs = outputs[:]
         for type_name in data_object_registry:
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
@@ -88,29 +87,38 @@
 
     def __iter__(self):
         # We can make this fancier, but this works
-        return self.outputs.__iter__()
+        for o in self._pre_outputs:
+            if isinstance(o, types.StringTypes):
+                yield load(o)
+            else:
+                yield o
 
     def __getitem__(self, key):
         if isinstance(key, types.SliceType):
             if isinstance(key.start, types.FloatType):
                 return self.get_range(key.start, key.stop)
-        return self.outputs[key]
+            # This will return a sliced up object!
+            return TimeSeriesData(self._pre_outputs[key], self.parallel)
+        o = self._pre_outputs[key]
+        if isinstance(o, types.StringTypes):
+            o = load(o)
+        return o
         
-    def _insert(self, pf):
-        # We get handed an instantiated parameter file
-        # Here we'll figure out a couple things about it, and then stick it
-        # inside our list.
-        self.outputs.append(pf)
-        
-    def eval(self, tasks, obj=None):
-        tasks = ensure_list(tasks)
-        return_values = {}
+    def __len__(self):
+        return len(self._pre_outputs)
+
+    def piter(self, storage = None):
         if self.parallel == False:
             njobs = 1
         else:
             if self.parallel == True: njobs = -1
             else: njobs = self.parallel
-        for store, pf in parallel_objects(self.outputs, njobs, return_values):
+        return parallel_objects(self, njobs, storage)
+        
+    def eval(self, tasks, obj=None):
+        tasks = ensure_list(tasks)
+        return_values = {}
+        for store, pf in self.piter(return_values):
             store.result = []
             for task in tasks:
                 try:
@@ -132,23 +140,20 @@
 
     @classmethod
     def from_filenames(cls, filename_list, parallel = True):
-        outputs = []
-        for fn in filename_list:
-            outputs.append(load(fn))
-        obj = cls(outputs, parallel = parallel)
+        obj = cls(filename_list[:], parallel = parallel)
         return obj
 
     @classmethod
     def from_output_log(cls, output_log,
                         line_prefix = "DATASET WRITTEN",
                         parallel = True):
-        outputs = []
+        filenames = []
         for line in open(output_log):
             if not line.startswith(line_prefix): continue
             cut_line = line[len(line_prefix):].strip()
             fn = cut_line.split()[0]
-            outputs.append(load(fn))
-        obj = cls(outputs, parallel = parallel)
+            filenames.append(fn)
+        obj = cls(filenames, parallel = parallel)
         return obj
 
 class TimeSeriesQuantitiesContainer(object):


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -614,6 +614,9 @@
     add_field(n, function=eval("_%s" % n), particle_type=True,
               convert_function=_convertSpecificAngularMomentum,
               units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+    add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
+              convert_function=_convertSpecificAngularMomentumKMSMPC,
+              units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
 
 def _ParticleAngularMomentum(field, data):
     return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -354,6 +354,11 @@
             f = h5py.File(self.hierarchy_filename[:-9] + "harrays")
         except:
             return False
+        hash = f["/"].attrs.get("hash", None)
+        if hash != self.parameter_file._hash():
+            mylog.info("Binary hierarchy does not match: recreating")
+            f.close()
+            return False
         self.grid_dimensions[:] = f["/ActiveDimensions"][:]
         self.grid_left_edge[:] = f["/LeftEdges"][:]
         self.grid_right_edge[:] = f["/RightEdges"][:]
@@ -390,6 +395,7 @@
             f = h5py.File(self.hierarchy_filename[:-9] + "harrays", "w")
         except IOError:
             return
+        f["/"].attrs["hash"] = self.parameter_file._hash()
         f.create_dataset("/LeftEdges", data=self.grid_left_edge)
         f.create_dataset("/RightEdges", data=self.grid_right_edge)
         parents, procs, levels = [], [], []


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -151,7 +151,7 @@
         if not os.path.exists(fn): return
         with open(fn, 'r') as f:
             lines = f.readlines()
-            self.num_stars = int(lines[0].strip())
+            self.num_stars = int(lines[0].strip()[0])
             for line in lines[1:]:
                 particle_position_x = float(line.split(' ')[1])
                 particle_position_y = float(line.split(' ')[2])


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -343,7 +343,11 @@
     obj_ids = na.arange(len(objects))
 
     to_share = {}
-    for result_id, obj in zip(obj_ids, objects)[my_new_id::njobs]:
+    # If our objects object is slice-aware, like time series data objects are,
+    # this will prevent intermediate objects from being created.
+    oiter = itertools.izip(obj_ids[my_new_id::njobs],
+                           objects[my_new_id::njobs])
+    for result_id, obj in oiter:
         if storage is not None:
             rstore = ResultsStorage()
             rstore.result_id = result_id


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -679,6 +679,7 @@
         self.circle_args = circle_args
         self.text = text
         self.text_args = text_args
+        if self.text_args is None: self.text_args = {}
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
@@ -696,7 +697,7 @@
         cir = Circle((center_x, center_y), radius, **self.circle_args)
         plot._axes.add_patch(cir)
         if self.text is not None:
-            plot._axes.text(center_x, center_y, "%s" % halo.id,
+            plot._axes.text(center_x, center_y, self.text,
                             **self.text_args)
 
 class HopCircleCallback(PlotCallback):


diff -r ccb6d90c2b06c1175f43912bfd0d2ace5c66dc7d -r 48d9836857da3044fa6cdb1756172ac916166547 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -37,7 +37,7 @@
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
+    ParallelAnalysisInterface, ProcessorPool
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from numpy import pi
 
@@ -71,7 +71,7 @@
             The vector between the camera position and the center.
         width : float or list of floats
             The current width of the image.  If a single float, the volume is
-            cubical, but if not, it is front/back, left/right, top/bottom.
+            cubical, but if not, it is left/right, top/bottom, front/back.
         resolution : int or list of ints
             The number of pixels in each direction.
         north_vector : array_like, optional
@@ -194,7 +194,7 @@
         self.resolution = resolution
         self.sub_samples = sub_samples
         if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
+            width = (width, width, width) # left/right, top/bottom, front/back 
         self.width = width
         self.center = center
         self.steady_north = steady_north
@@ -250,7 +250,7 @@
         east_vector = -na.cross(north_vector, normal_vector).ravel()
         east_vector /= na.sqrt(na.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.unit_vectors = [north_vector, east_vector, normal_vector]
+        self.unit_vectors = [east_vector, north_vector, normal_vector]
         self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
                                      self.unit_vectors[1]*self.width[1],
                                      self.unit_vectors[2]*self.width[2]])
@@ -258,8 +258,8 @@
         self.origin = self.center - 0.5*self.width[0]*self.unit_vectors[0] \
                                   - 0.5*self.width[1]*self.unit_vectors[1] \
                                   - 0.5*self.width[2]*self.unit_vectors[2]
-        self.back_center = self.center - 0.5*self.width[0]*self.unit_vectors[2]
-        self.front_center = self.center + 0.5*self.width[0]*self.unit_vectors[2]
+        self.back_center = self.center - 0.5*self.width[2]*self.unit_vectors[2]
+        self.front_center = self.center + 0.5*self.width[2]*self.unit_vectors[2]
         self.inv_mat = na.linalg.pinv(self.unit_vectors)
 
     def look_at(self, new_center, north_vector = None):
@@ -292,7 +292,7 @@
             The new looking vector.
         width: float or array of floats, optional
             The new width.  Can be a single value W -> [W,W,W] or an
-            array [W1, W2, W3]
+            array [W1, W2, W3] (left/right, top/bottom, front/back)
         center: array_like, optional
             Specifies the new center.
         north_vector : array_like, optional
@@ -302,7 +302,7 @@
         if width is None:
             width = self.width
         if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
+            width = (width, width, width) # left/right, tom/bottom, front/back 
         self.width = width
         if center is not None:
             self.center = center
@@ -428,7 +428,7 @@
         """
         self.width = [w / factor for w in self.width]
         self._setup_normalized_vectors(
-                self.unit_vectors[2], self.unit_vectors[0])
+                self.unit_vectors[2], self.unit_vectors[1])
 
     def zoomin(self, final, n_steps, clip_ratio = None):
         r"""Loop over a zoomin and return snapshots along the way.
@@ -493,23 +493,23 @@
             if final_width is not None:
                 if not iterable(final_width):
                     width = na.array([final_width, final_width, final_width]) 
-                    # front/back, left/right, top/bottom
+                    # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
                     self.center += (na.array(final) - self.center) / (10. * n_steps)
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = 1.0
+                dW = na.array([1.0,1.0,1.0])
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
                     width = na.array([final_width, final_width, final_width]) 
-                    # front/back, left/right, top/bottom
+                    # left/right, top/bottom, front/back
                 dW = (1.0*final_width-na.array(self.width))/n_steps
             else:
-                dW = 1.0
+                dW = na.array([0.0,0.0,0.0])
             dx = (na.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
@@ -1325,7 +1325,7 @@
         The vector between the camera position and the center.
     width : float or list of floats
         The current width of the image.  If a single float, the volume is
-        cubical, but if not, it is front/back, left/right, top/bottom.
+        cubical, but if not, it is left/right, top/bottom, front/back
     resolution : int or list of ints
         The number of pixels in each direction.
     field : string



https://bitbucket.org/yt_analysis/yt/changeset/b4db9a5b5704/
changeset:   b4db9a5b5704
branch:      yt
user:        samskillman
date:        2012-05-11 23:05:08
summary:     Intermediate commit work on opaque rendering in parallel.  Still a bug in back-to-front vs front-to-back.
affected #:  4 files

diff -r 48d9836857da3044fa6cdb1756172ac916166547 -r b4db9a5b5704d5870cb3ecfbbd81cc366162c18b yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -92,8 +92,9 @@
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
     ttot = trgba[0] + trgba[1] + trgba[2]
-    ta = expf(-dt*fmax(ttot, 0.0))
+    ta = expf(-fmax(dt*ttot, 0.0))
     for i in range(3):
+        #ta = expf(-dt*fmax(trgba[i], 0.0))
         rgba[i] = (1.0-ta)*trgba[i] + ta*rgba[i]
 
 @cython.boundscheck(False)


diff -r 48d9836857da3044fa6cdb1756172ac916166547 -r b4db9a5b5704d5870cb3ecfbbd81cc366162c18b yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -28,11 +28,13 @@
 import numpy as na
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
+from yt.visualization.image_writer import write_image, write_bitmap
 from yt.utilities.amr_utils import kdtree_get_choices
 from yt.utilities._amr_utils.grid_traversal import PartitionedGrid
 from yt.utilities.performance_counters import yt_counters, time_function
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
+import matplotlib.pylab as pl
 from copy import deepcopy
 from yt.config import ytcfg
 from time import time
@@ -923,6 +925,7 @@
                     current = current.parent
             else:
                 current = current.parent
+        
         return current, previous
                 
     def count_volume(self):
@@ -994,7 +997,13 @@
         current_node.id = 0
         par_tree_depth = int(na.log2(self.comm.size))
         anprocs = 2**par_tree_depth
+
+        volume_partitioned = 0.0
+        pbar = get_pbar("Building kd-Tree", 1.0)
+
         while current_node is not None:
+            pbar.update(volume_partitioned)
+
             # If we don't have any grids, that means we are revisiting
             # a dividing node, and there is nothing to be done.
             try: ngrids = current_node.grids
@@ -1038,6 +1047,7 @@
 
                     # Else make a leaf node (brick container)
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
+                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1054,6 +1064,9 @@
 
             # Step to the nest node in a depth-first traversal.
             current_node, previous_node = self.step_depth(current_node, previous_node)
+        
+        pbar.finish()
+
 
     def _get_choices(self, current_node):
         '''
@@ -1116,31 +1129,38 @@
             Position of the front center to which the traversal progresses.
         image: na.array
             Image plane to contain resulting ray cast.
-            
+
         Returns
         ----------
         None, but modifies the image array.
-        
+
         See Also
         ----------
         yt.visualization.volume_rendering.camera
-        
+
         """
         if self.tree is None: 
             print 'No KD Tree Exists'
             return
         self.image = image
 
-        viewpoint = front_center - back_center 
+        viewpoint = front_center - back_center
+        viewpoint = front_center 
+        print 'Moving from front_center to back_center:',front_center, back_center
 
         for node in self.viewpoint_traverse(viewpoint):
             if node.grid is not None:
                 if node.brick is not None:
+                    distl = (front_center - node.l_corner).min()
+                    distr = (front_center - node.r_corner).min()
+                    #print distl, distr
                     yield node.brick
-         
+
+        mylog.debug('About to enter reduce, my image has a max of %e' %
+                self.image.max())
         self.reduce_tree_images(self.tree, viewpoint)
         self.comm.barrier()
-        
+
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
@@ -1154,7 +1174,7 @@
             try:
                 my_node.left_child.owner = my_node.owner
                 my_node.right_child.owner = my_node.owner + 2**(rounds-(i+1))
-                if path[i+1] == '0': 
+                if path[i+1] == '0':
                     my_node = my_node.left_child
                     my_node_id = my_node.id
                 else:
@@ -1163,7 +1183,6 @@
             except:
                 rounds = i-1
         for thisround in range(rounds,0,-1):
-            self.image[self.image>1.0]=1.0
             #print self.comm.rank, 'my node', my_node_id
             parent = my_node.parent
             #print parent['split_ax'], parent['split_pos']
@@ -1172,10 +1191,12 @@
                 back = parent.left_child
             else:
                 front = parent.left_child
-                back = parent.right_child 
+                back = parent.right_child
+            print 'Combining', viewpoint, parent.split_ax, parent.split_pos
+            print front.l_corner, front.r_corner
+            print back.l_corner, back.r_corner
 
             # mylog.debug('front owner %i back owner %i parent owner %i'%( front.owner, back.owner, parent.owner))
-                
             # Send the images around
             if front.owner == self.comm.rank:
                 if front.owner == parent.owner:
@@ -1183,32 +1204,45 @@
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     ta = na.exp(-na.sum(self.image,axis=2))
+                    mylog.debug('I am here!-----------!')
+                    write_bitmap(self.image, 'int_image.png')
+                    write_bitmap(arr2, 'int_arr2.png')
+                    write_image(ta.T, 'int_ta.png',cmap_name='gist_stern')
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
-                        self.image[:,:,i  ] = self.image[:,:,i  ] + (ta)*arr2[:,:,i  ]
+                        #ta = na.exp(-self.image[:,:,i])
+                        self.image[:,:,i  ] = (1.0-ta)*self.image[:,:,i  ] + ta*arr2[:,:,i]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
+                    mylog.debug('%04i sending my image to %04i with max %e'%(self.comm.rank,back.owner, self.image.max()))
                     self.comm.send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
-                
+
             if back.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank, front.owner))
+                    mylog.debug('%04i sending my image to %04i with max %e'%(self.comm.rank, front.owner,
+                                self.image.max()))
                     self.comm.send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = na.exp(-na.sum(arr2,axis=2))
+                    ta = na.exp(-na.sum(self.image,axis=2))
+                    write_bitmap(self.image, 'int_image.png')
+                    write_bitmap(arr2, 'int_arr2.png')
+                    write_image(ta.T, 'int_ta.png',cmap_name='gist_stern')
+
+                    mylog.debug('Reducing, ta shape = ' + str(ta.shape))
+                    mylog.debug('im max: %e arr2 max: %e, tamax: %e tamin: %e' %
+                            (self.image.max(), arr2.max(),ta.max(), ta.min()))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
                         # print arr2.shape
-                        self.image[:,:,i  ] = arr2[:,:,i  ] + ta * self.image[:,:,i  ]
+                        self.image[:,:,i  ] = (1.0-ta)*self.image[:,:,i  ] + ta*arr2[:,:,i]
 
             # Set parent owner to back owner
             # my_node = (my_node-1)>>1


diff -r 48d9836857da3044fa6cdb1756172ac916166547 -r b4db9a5b5704d5870cb3ecfbbd81cc366162c18b yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -116,7 +116,7 @@
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
-def write_bitmap(bitmap_array, filename, max_val = None):
+def write_bitmap(bitmap_array, filename, max_val = None, transpose=True):
     r"""Write out a bitmapped image directly to a PNG file.
 
     This accepts a three- or four-channel `bitmap_array`.  If the image is not
@@ -150,6 +150,9 @@
         s1, s2 = bitmap_array.shape[:2]
         alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
         bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+    if transpose:
+        for channel in range(bitmap_array.shape[2]):
+            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
     au.write_png(bitmap_array.copy(), filename)
     return bitmap_array
 


diff -r 48d9836857da3044fa6cdb1756172ac916166547 -r b4db9a5b5704d5870cb3ecfbbd81cc366162c18b yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -560,7 +560,8 @@
         mylog.debug("Adding gaussian at %s with width %s and colors %s" % (
                 v, w, (r,g,b,alpha)))
 
-    def map_to_colormap(self, mi, ma, scale=1.0, colormap="gist_stern"):
+    def map_to_colormap(self, mi, ma, scale=1.0, colormap="gist_stern",
+            scale_func=None):
         rel0 = int(self.nbins*(mi - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
@@ -568,11 +569,15 @@
         tomap = na.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
-        
-        self.red.y[rel0:rel1]  = cc[:,0]
-        self.green.y[rel0:rel1]= cc[:,1]
-        self.blue.y[rel0:rel1] = cc[:,2]
-        self.alpha.y[rel0:rel1]= cc[:,3]
+        if scale_func is None:
+            scale_mult = 1.0
+        else:
+            scale_mult = scale_func(tomap,0.0,1.0)
+        print scale_mult 
+        self.red.y[rel0:rel1]  = cc[:,0]*scale_mult
+        self.green.y[rel0:rel1]= cc[:,1]*scale_mult
+        self.blue.y[rel0:rel1] = cc[:,2]*scale_mult
+        self.alpha.y[rel0:rel1]= cc[:,3]*scale_mult
 
     def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
                    colormap="gist_stern", col_bounds = None):



https://bitbucket.org/yt_analysis/yt/changeset/93c78fbcc19c/
changeset:   93c78fbcc19c
branch:      yt
user:        samskillman
date:        2012-05-31 01:05:52
summary:     Quick-fixing the brick traversal for plane-parallel renders.  Before it was operating correctly for Perspective views but not plane-parallel.  This is a bit hacky. MPI parallel rendering is broken for very opaque still.  I think this will require the re-introduction of an alpha channel
affected #:  2 files

diff -r b4db9a5b5704d5870cb3ecfbbd81cc366162c18b -r 93c78fbcc19c3b3cd10d80ebe306244635fcba37 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1145,7 +1145,7 @@
         self.image = image
 
         viewpoint = front_center - back_center
-        viewpoint = front_center 
+        viewpoint = back_center 
         print 'Moving from front_center to back_center:',front_center, back_center
 
         for node in self.viewpoint_traverse(viewpoint):
@@ -1204,16 +1204,11 @@
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     ta = na.exp(-na.sum(self.image,axis=2))
-                    mylog.debug('I am here!-----------!')
-                    write_bitmap(self.image, 'int_image.png')
-                    write_bitmap(arr2, 'int_arr2.png')
-                    write_image(ta.T, 'int_ta.png',cmap_name='gist_stern')
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
-                        #ta = na.exp(-self.image[:,:,i])
-                        self.image[:,:,i  ] = (1.0-ta)*self.image[:,:,i  ] + ta*arr2[:,:,i]
+                        self.image[:,:,i  ] = self.image[:,:,i  ] + ta*arr2[:,:,i]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i with max %e'%(self.comm.rank,back.owner, self.image.max()))
@@ -1229,20 +1224,12 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = na.exp(-na.sum(self.image,axis=2))
-                    write_bitmap(self.image, 'int_image.png')
-                    write_bitmap(arr2, 'int_arr2.png')
-                    write_image(ta.T, 'int_ta.png',cmap_name='gist_stern')
-
-                    mylog.debug('Reducing, ta shape = ' + str(ta.shape))
-                    mylog.debug('im max: %e arr2 max: %e, tamax: %e tamin: %e' %
-                            (self.image.max(), arr2.max(),ta.max(), ta.min()))
+                    ta = na.exp(-na.sum(arr2,axis=2))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
-                        # print arr2.shape
-                        self.image[:,:,i  ] = (1.0-ta)*self.image[:,:,i  ] + ta*arr2[:,:,i]
+                        self.image[:,:,i  ] = ta*self.image[:,:,i  ] + arr2[:,:,i]
 
             # Set parent owner to back owner
             # my_node = (my_node-1)>>1


diff -r b4db9a5b5704d5870cb3ecfbbd81cc366162c18b -r 93c78fbcc19c3b3cd10d80ebe306244635fcba37 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -386,7 +386,8 @@
                 for data in brick.my_data:
                     if na.any(na.isnan(data)):
                         raise RuntimeError
-        for brick in self.volume.traverse(self.back_center, self.front_center, image):
+        view_pos = self.front_center + self.unit_vectors[2] * 1.0e6 * self.width[2]
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads = num_threads)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)



https://bitbucket.org/yt_analysis/yt/changeset/7a6429b90ad1/
changeset:   7a6429b90ad1
branch:      yt
user:        samskillman
date:        2012-05-31 17:21:21
summary:     Not all volumes are equal to 1.
affected #:  1 file

diff -r 93c78fbcc19c3b3cd10d80ebe306244635fcba37 -r 7a6429b90ad1a9d85e8fbc0deb54b7e6137fb72b yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -999,7 +999,8 @@
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
-        pbar = get_pbar("Building kd-Tree", 1.0)
+        pbar = get_pbar("Building kd-Tree",
+                na.prod(self.domain_right_edge-self.domain_left_edge))
 
         while current_node is not None:
             pbar.update(volume_partitioned)



https://bitbucket.org/yt_analysis/yt/changeset/3d106010de5c/
changeset:   3d106010de5c
branch:      yt
user:        samskillman
date:        2012-06-04 17:18:56
summary:     Reverting expf(-x) to (1.-x). It does not change much, and it is a lot cheaper.
affected #:  2 files

diff -r 7a6429b90ad1a9d85e8fbc0deb54b7e6137fb72b -r 3d106010de5cc1465a9df80ef6c8c57a456787a7 yt/utilities/_amr_utils/field_interpolation_tables.pxd
--- a/yt/utilities/_amr_utils/field_interpolation_tables.pxd
+++ b/yt/utilities/_amr_utils/field_interpolation_tables.pxd
@@ -92,9 +92,9 @@
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
     ttot = trgba[0] + trgba[1] + trgba[2]
-    ta = expf(-fmax(dt*ttot, 0.0))
+    ta = fmax(1.0 - dt*ttot, 0.0)
     for i in range(3):
-        #ta = expf(-dt*fmax(trgba[i], 0.0))
+        #ta = 1.0-dt*fmax(trgba[i], 0.0))
         rgba[i] = (1.0-ta)*trgba[i] + ta*rgba[i]
 
 @cython.boundscheck(False)


diff -r 7a6429b90ad1a9d85e8fbc0deb54b7e6137fb72b -r 3d106010de5cc1465a9df80ef6c8c57a456787a7 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1204,12 +1204,13 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = na.exp(-na.sum(self.image,axis=2))
+                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some
                         # measure of emissivity.
-                        self.image[:,:,i  ] = self.image[:,:,i  ] + ta*arr2[:,:,i]
+                        self.image[:,:,i  ] = self.image[:,:,i] + ta*arr2[:,:,i]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i with max %e'%(self.comm.rank,back.owner, self.image.max()))
@@ -1225,7 +1226,9 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = na.exp(-na.sum(arr2,axis=2))
+                    #ta = na.exp(-na.sum(arr2,axis=2))
+                    ta = 1.0 - na.sum(arr2, axis=2)
+                    ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
                         # slice.  Previously it was ill-defined, but represented some



https://bitbucket.org/yt_analysis/yt/changeset/b96f84709582/
changeset:   b96f84709582
branch:      yt
user:        samskillman
date:        2012-06-05 00:03:14
summary:     intermediate work on refactoring the camera. Camera and FisheyeCamera working.  Most of the rest are broken.
affected #:  3 files

diff -r 3d106010de5cc1465a9df80ef6c8c57a456787a7 -r b96f847095825c21a2f7a529527d3c6de9cfeb27 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -1019,3 +1019,38 @@
             healpix_interface.vec2pix_nest(nside, v0, &ipix)
             #print "Rotated", v0[0], v0[1], v0[2], v1[0], v1[1], v1[2], ipix, pix_image[ipix]
             image[j, i] = pix_image[ipix]
+
+def arr_fisheye_vectors(int resolution, np.float64_t fov, int nimx=1, int
+        nimy=1, int nimi=0, int nimj=0, np.float64_t off_theta=0.0, np.float64_t
+        off_phi=0.0):
+    # We now follow figures 4-7 of:
+    # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+    # ...but all in Cython.
+    cdef np.ndarray[np.float64_t, ndim=3] vp
+    cdef int i, j, k
+    cdef np.float64_t r, phi, theta, px, py
+    cdef np.float64_t pi = 3.1415926
+    cdef np.float64_t fov_rad = fov * pi / 180.0
+    cdef int nx = resolution/nimx
+    cdef int ny = resolution/nimy
+    vp = np.zeros((nx,ny, 3), dtype="float64")
+    for i in range(nx):
+        px = 2.0 * (nimi*nx + i) / (resolution) - 1.0
+        for j in range(ny):
+            py = 2.0 * (nimj*ny + j) / (resolution) - 1.0
+            r = (px*px + py*py)**0.5
+            if r == 0.0:
+                phi = 0.0
+            elif px < 0:
+                phi = pi - asin(py / r)
+            else:
+                phi = asin(py / r)
+            theta = r * fov_rad / 2.0
+            theta += off_theta
+            phi += off_phi
+            vp[i,j,0] = sin(theta) * cos(phi)
+            vp[i,j,1] = sin(theta) * sin(phi)
+            vp[i,j,2] = cos(theta)
+    return vp
+
+


diff -r 3d106010de5cc1465a9df80ef6c8c57a456787a7 -r b96f847095825c21a2f7a529527d3c6de9cfeb27 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1145,16 +1145,12 @@
             return
         self.image = image
 
-        viewpoint = front_center - back_center
         viewpoint = back_center 
         print 'Moving from front_center to back_center:',front_center, back_center
 
         for node in self.viewpoint_traverse(viewpoint):
             if node.grid is not None:
                 if node.brick is not None:
-                    distl = (front_center - node.l_corner).min()
-                    distr = (front_center - node.r_corner).min()
-                    #print distl, distr
                     yield node.brick
 
         mylog.debug('About to enter reduce, my image has a max of %e' %


diff -r 3d106010de5cc1465a9df80ef6c8c57a456787a7 -r b96f847095825c21a2f7a529527d3c6de9cfeb27 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -45,7 +45,7 @@
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
     LightSourceRenderSampler, \
     arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest, \
-    pixelize_healpix
+    pixelize_healpix, arr_fisheye_vectors
 
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
@@ -311,27 +311,62 @@
         if normal_vector is None:
             normal_vector = self.front_center-self.center
         self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def new_image(self):
+        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        return image
         
-    def get_vector_plane(self, image):
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        # We might have a different width and back_center
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
-                         self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
-                         self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
-        bc = self.back_center
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
-                          dtype='float64', order='C')
-        positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
-        positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
-        positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
-        bounds = (px.min(), px.max(), py.min(), py.max())
-        return (positions, self.box_vectors[2],
-                self.back_center, bounds, image,
-                self.unit_vectors[0],
-                self.unit_vectors[1])
+    def get_sampler_args(self, image):
+        rotp = na.concatenate([self.inv_mat.ravel('F'), self.back_center.ravel()])
+        args = (rotp, self.box_vectors[2], self.back_center,
+                (-self.width[0]/2.0, self.width[0]/2.0,
+                 -self.width[1]/2.0, self.width[1]/2.0),
+                image, self.unit_vectors[0], self.unit_vectors[1],
+                na.array(self.width),
+                self.transfer_function, self.sub_samples)
+        return args
+
+    def get_sampler(self, args):
+        if self.use_light:
+            if self.light_dir is None:
+                self.set_default_light_dir()
+            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = self.light_dir[0] * self.unit_vectors[1] + \
+                    self.light_dir[1] * self.unit_vectors[2] + \
+                    self.light_dir[2] * self.unit_vectors[0]
+            if self.light_rgba is None:
+                self.set_default_light_rgba()
+            sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
+                    light_rgba=self.light_rgba)
+        else:
+            sampler = VolumeRenderSampler(*args)
+        return sampler
+
+    def _render(self, double_check, num_threads, image, na, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if na.any(na.isnan(data)):
+                        raise RuntimeError
+        
+        view_pos = self.front_center + self.unit_vectors[2] * 1.0e6 * self.width[2]
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
+            sampler(brick, num_threads=num_threads)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        
+        pbar.finish()
+        image = sampler.aimage
+        return image
+
+    def save_image(self, fn, clip_ratio, image):
+        if self.comm.rank is 0 and fn is not None:
+            if clip_ratio is not None:
+                write_bitmap(image, fn, clip_ratio * image.std())
+            else:
+                write_bitmap(image, fn)
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
@@ -354,51 +389,17 @@
         image : array
             An (N,M,3) array of the final returned values, in float64 form.
         """
-        image = na.zeros((self.resolution[0], self.resolution[1], 3),
-                         dtype='float64', order='C')
-        rotp = na.concatenate([self.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
-                (-self.width[0]/2.0, self.width[0]/2.0,
-                 -self.width[1]/2.0, self.width[1]/2.0),
-                image, self.unit_vectors[0], self.unit_vectors[1],
-                na.array(self.width),
-                self.transfer_function, self.sub_samples)
-        if self.use_light:
-            if self.light_dir is None:
-                self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
-            temp_dir = self.light_dir[0] * self.unit_vectors[1] + \
-                    self.light_dir[1] * self.unit_vectors[2] + \
-                    self.light_dir[2] * self.unit_vectors[0]
-            if self.light_rgba is None:
-                self.set_default_light_rgba()
-            sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
-        else:
-            sampler = VolumeRenderSampler(*args)
+        image = self.new_image()
+
+        args = self.get_sampler_args(image)
+
+        sampler = self.get_sampler(args)
+
         self.volume.initialize_source()
 
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
-        total_cells = 0
-        if double_check:
-            for brick in self.volume.bricks:
-                for data in brick.my_data:
-                    if na.any(na.isnan(data)):
-                        raise RuntimeError
-        view_pos = self.front_center + self.unit_vectors[2] * 1.0e6 * self.width[2]
-        for brick in self.volume.traverse(view_pos, self.front_center, image):
-            sampler(brick, num_threads = num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        image = sampler.aimage
+        image = self._render(double_check, num_threads, image, na, sampler)
 
-        if self.comm.rank is 0 and fn is not None:
-            if clip_ratio is not None:
-                write_bitmap(image, fn, clip_ratio*image.std())
-            else:
-                write_bitmap(image, fn)
+        self.save_image(fn, clip_ratio, image)
 
         return image
 
@@ -847,8 +848,11 @@
     def __init__(self, center, radius, fov, resolution,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, no_ghost=False, rotation = None):
+                 pf = None, no_ghost=False, rotation = None, use_light=False):
         ParallelAnalysisInterface.__init__(self)
+        self.use_light = use_light
+        self.light_dir = None
+        self.light_rgba = None
         if rotation is None: rotation = na.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
@@ -870,11 +874,11 @@
                                log_fields=log_fields)
         self.volume = volume
 
-    def snapshot(self):
+    def new_image(self):
         image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
-        # We now follow figures 4-7 of:
-        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
-        # ...but all in Cython.
+        return image
+        
+    def get_sampler_args(self, image):
         vp = arr_fisheye_vectors(self.resolution, self.fov)
         vp.shape = (self.resolution**2,1,3)
         vp2 = vp.copy()
@@ -884,21 +888,31 @@
         vp *= self.radius
         uv = na.ones(3, dtype='float64')
         positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
-        vector_plane = VectorPlane(positions, vp, self.center,
-                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Rendering fisheye of %s^2", self.resolution)
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
+        args = (positions, vp, self.center,
+                (0.0, 1.0, 0.0, 1.0),
+                image, uv, uv,
+                na.zeros(3, dtype='float64'),
+                self.transfer_function, self.sub_samples)
+        return args
+
+    def _render(self, double_check, num_threads, image, na, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
-        for brick in self.volume.traverse(None, self.center, image):
-            brick.cast_plane(tfp, vector_plane)
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if na.any(na.isnan(data)):
+                        raise RuntimeError
+        
+        view_pos = self.center
+        for brick in self.volume.traverse(view_pos, None, image):
+            sampler(brick, num_threads=num_threads)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
+        
         pbar.finish()
+        image = sampler.aimage
         image.shape = (self.resolution, self.resolution, 3)
         return image
 



https://bitbucket.org/yt_analysis/yt/changeset/398221a45ae7/
changeset:   398221a45ae7
branch:      yt
user:        samskillman
date:        2012-06-05 01:28:30
summary:     PerspectiveCamera is working
affected #:  1 file

diff -r b96f847095825c21a2f7a529527d3c6de9cfeb27 -r 398221a45ae7d2b9f5036a69393a45a284e8378e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -342,6 +342,9 @@
             sampler = VolumeRenderSampler(*args)
         return sampler
 
+    def finalize_image(self, image):
+        pass
+
     def _render(self, double_check, num_threads, image, na, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
@@ -359,6 +362,7 @@
         
         pbar.finish()
         image = sampler.aimage
+        self.finalize_image(image)
         return image
 
     def save_image(self, fn, clip_ratio, image):
@@ -649,19 +653,20 @@
 data_object_registry["interactive_camera"] = InteractiveCamera
 
 class PerspectiveCamera(Camera):
-    def get_vector_plane(self, image):
+    
+    def get_sampler_args(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
         # We might have a different width and back_center
         dl = (self.back_center - self.front_center)
-        self.front_center += dl
+        self.front_center += self.expand_factor*dl
         self.back_center -= dl
-        px = self.expand_factor*na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        
+        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.inv_mat
-        bc = self.back_center
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
@@ -671,14 +676,22 @@
         
         # We are likely adding on an odd cutting condition here
         vectors = self.front_center - positions
-        positions = self.front_center - 2.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
+        positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
+        
+        uv = na.ones(3, dtype='float64')
+        image.shape = (self.resolution[0]**2,1,3)
+        vectors.shape = (self.resolution[0]**2,1,3)
+        positions.shape = (self.resolution[0]**2,1,3)
+        args = (positions, vectors, self.back_center, 
+                (0.0,1.0,0.0,1.0),
+                image, uv, uv,
+                na.zeros(3, dtype='float64'), 
+                self.transfer_function, self.sub_samples)
+        return args
 
-        vector_plane = VectorPlane(positions, vectors,
-                                      self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
-        return vector_plane
+    def finalize_image(self, image):
+        image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
     return na.array([
@@ -896,6 +909,10 @@
                 self.transfer_function, self.sub_samples)
         return args
 
+
+    def finalize_image(self, image):
+        image.shape = self.resolution, self.resolution, 3
+
     def _render(self, double_check, num_threads, image, na, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
@@ -913,7 +930,9 @@
         
         pbar.finish()
         image = sampler.aimage
-        image.shape = (self.resolution, self.resolution, 3)
+
+        self.finalize_image(image)
+
         return image
 
 class MosaicFisheyeCamera(Camera):



https://bitbucket.org/yt_analysis/yt/changeset/4c883fd551d7/
changeset:   4c883fd551d7
branch:      yt
user:        samskillman
date:        2012-06-05 02:02:55
summary:     HEALpixCamera runs, but I have never had luck making images.  Will need another pair of eyes.
affected #:  2 files

diff -r 398221a45ae7d2b9f5036a69393a45a284e8378e -r 4c883fd551d7d0c1bb50d0bd6b7a3ba9c4b0123c yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -38,4 +38,5 @@
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
-    off_axis_projection, FisheyeCamera, MosaicFisheyeCamera
+    off_axis_projection, FisheyeCamera, MosaicFisheyeCamera, \
+    HEALpixCamera


diff -r 398221a45ae7d2b9f5036a69393a45a284e8378e -r 4c883fd551d7d0c1bb50d0bd6b7a3ba9c4b0123c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -709,7 +709,7 @@
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, use_kd=True, no_ghost=False):
+                 pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
         self.center = na.array(center, dtype='float64')
@@ -723,36 +723,91 @@
         self.fields = fields
         self.sub_samples = sub_samples
         self.log_fields = log_fields
+        self.use_light = use_light
+        self.light_dir = None
+        self.light_rgba = None
         if volume is None:
             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
                                log_fields=log_fields)
         self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def snapshot(self, fn = None, clim = None):
-        nv = 12*self.nside**2
-        image = na.zeros((nv,1,3), dtype='float64', order='C')
+    def new_image(self):
+        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        return image
+
+    def get_sampler_args(self, image):
+        nv = 12 * self.nside ** 2
         vs = arr_pix2vec_nest(self.nside, na.arange(nv))
         vs *= self.radius
-        vs.shape = (nv,1,3)
+        vs.shape = nv, 1, 3
         uv = na.ones(3, dtype='float64')
         positions = na.ones((nv, 1, 3), dtype='float64') * self.center
-        vector_plane = VectorPlane(positions, vs, self.center,
-                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Rendering equivalent of %0.2f^2 image", nv**0.5)
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        args = (positions, vs, self.center,
+                (0.0, 1.0, 0.0, 1.0),
+                image, uv, uv,
+                na.zeros(3, dtype='float64'),
+                self.transfer_function, self.sub_samples)
+        return args
+ 
 
+    def _render(self, double_check, num_threads, image, na, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
-        for brick in self.volume.traverse(None, self.center, image):
-            brick.cast_plane(tfp, vector_plane)
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if na.any(na.isnan(data)):
+                        raise RuntimeError
+        
+        view_pos = self.center
+        for brick in self.volume.traverse(view_pos, None, image):
+            sampler(brick, num_threads=num_threads)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
+        
         pbar.finish()
+        image = sampler.aimage
 
+        self.finalize_image(image)
+
+        return image
+
+    def snapshot(self, fn = None, clip_ratio = None, double_check = False,
+                 num_threads = 0, clim = None):
+        r"""Ray-cast the camera.
+
+        This method instructs the camera to take a snapshot -- i.e., call the ray
+        caster -- based on its current settings.
+
+        Parameters
+        ----------
+        fn : string, optional
+            If supplied, the image will be saved out to this before being
+            returned.  Scaling will be to the maximum value.
+        clip_ratio : float, optional
+            If supplied, the 'max_val' argument to write_bitmap will be handed
+            clip_ratio * image.std()
+
+        Returns
+        -------
+        image : array
+            An (N,M,3) array of the final returned values, in float64 form.
+        """
+        image = self.new_image()
+
+        args = self.get_sampler_args(image)
+
+        sampler = self.get_sampler(args)
+
+        self.volume.initialize_source()
+
+        image = self._render(double_check, num_threads, image, na, sampler)
+
+        self.save_image(fn, clim, image)
+
+        return image
+    def save_image(self, fn, clim, image):
         if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
@@ -772,7 +827,6 @@
             ax.yaxis.set_ticks(())
             canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
             canvas.print_figure(fn)
-        return image
 
 
 class AdaptiveHEALpixCamera(Camera):



https://bitbucket.org/yt_analysis/yt/changeset/324dab3abc9d/
changeset:   324dab3abc9d
branch:      yt
user:        samskillman
date:        2012-06-05 02:25:41
summary:     StereoPair now works
affected #:  1 file

diff -r 4c883fd551d7d0c1bb50d0bd6b7a3ba9c4b0123c -r 324dab3abc9d4edf41f18a88db7abbd025ad237c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -902,13 +902,13 @@
         left_normal = fc + uv[1] * 0.5*self.relative_separation * wx - c
         right_normal = fc - uv[1] * 0.5*self.relative_separation * wx - c
         left_camera = Camera(c, left_normal, oc.width,
-                             oc.resolution, oc.transfer_function, uv[0],
-                             oc.volume, oc.fields, oc.log_fields,
-                             oc.sub_samples, oc.pf)
+                             oc.resolution, oc.transfer_function, north_vector=uv[0],
+                             volume=oc.volume, fields=oc.fields, log_fields=oc.log_fields,
+                             sub_samples=oc.sub_samples, pf=oc.pf)
         right_camera = Camera(c, right_normal, oc.width,
-                             oc.resolution, oc.transfer_function, uv[0],
-                             oc.volume, oc.fields, oc.log_fields,
-                             oc.sub_samples, oc.pf)
+                             oc.resolution, oc.transfer_function, north_vector=uv[0],
+                             volume=oc.volume, fields=oc.fields, log_fields=oc.log_fields,
+                             sub_samples=oc.sub_samples, pf=oc.pf)
         return (left_camera, right_camera)
 
 class FisheyeCamera(Camera):



https://bitbucket.org/yt_analysis/yt/changeset/f53085c08f88/
changeset:   f53085c08f88
branch:      yt
user:        samskillman
date:        2012-06-05 02:33:52
summary:     Adding InteractiveCamera to vr/api.py, and it works.
affected #:  1 file

diff -r 324dab3abc9d4edf41f18a88db7abbd025ad237c -r f53085c08f883254541f1ab52eed1eaabd179ee8 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -39,4 +39,4 @@
                            plot_channel, plot_rgb
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection, FisheyeCamera, MosaicFisheyeCamera, \
-    HEALpixCamera
+    HEALpixCamera, InteractiveCamera



https://bitbucket.org/yt_analysis/yt/changeset/2878e15dc70e/
changeset:   2878e15dc70e
branch:      yt
user:        samskillman
date:        2012-06-05 03:02:26
summary:     ProjectionCamera works
affected #:  2 files

diff -r f53085c08f883254541f1ab52eed1eaabd179ee8 -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -39,4 +39,4 @@
                            plot_channel, plot_rgb
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection, FisheyeCamera, MosaicFisheyeCamera, \
-    HEALpixCamera, InteractiveCamera
+    HEALpixCamera, InteractiveCamera, ProjectionCamera


diff -r f53085c08f883254541f1ab52eed1eaabd179ee8 -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -34,7 +34,7 @@
 #from yt.utilities.amr_utils import \
 #    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
 #    arr_ang2pix_nest
-from yt.visualization.image_writer import write_bitmap
+from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool
@@ -372,6 +372,10 @@
             else:
                 write_bitmap(image, fn)
 
+
+    def initialize_source(self):
+        return self.volume.initialize_source()
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
         r"""Ray-cast the camera.
@@ -399,7 +403,7 @@
 
         sampler = self.get_sampler(args)
 
-        self.volume.initialize_source()
+        self.initialize_source()
 
         image = self._render(double_check, num_threads, image, na, sampler)
 
@@ -1619,9 +1623,9 @@
     return img, count
 
 class ProjectionCamera(Camera):
-    def __init__(self, pf, center, normal_vector, width, resolution,
+    def __init__(self, center, normal_vector, width, resolution,
             field, weight=None, volume=None, le=None, re=None,
-            north_vector=None):
+            north_vector=None, pf=None):
         Camera.__init__(self, center, normal_vector, width, resolution, None,
                 fields = field, pf=pf, volume=1,
                 le=le, re=re, north_vector=north_vector)
@@ -1629,24 +1633,16 @@
         self.weight = weight
         self.resolution = resolution
 
-    def snapshot(self):
-        fields = [self.field]
-        resolution = self.resolution
+    def get_sampler(self, args):
+        sampler = ProjectionSampler(*args)
+        return sampler
+
+    def initialize_source(self):
+        pass
+
+
+    def get_sampler_args(self, image):
         width = self.width[2]
-        pf = self.pf
-        if self.weight is not None:
-            # This is a temporary field, which we will remove at the end.
-            def _make_wf(f, w):
-                def temp_weightfield(a, b):
-                    tr = b[f].astype("float64") * b[w]
-                    return tr
-                return temp_weightfield
-            pf.field_info.add_field("temp_weightfield",
-                function=_make_wf(self.field, self.weight))
-            fields = ["temp_weightfield", self.weight]
-        image = na.zeros((resolution, resolution, 3), dtype='float64',
-                          order='C')
-
         north_vector = self.unit_vectors[0]
         east_vector = self.unit_vectors[1]
         normal_vector = self.unit_vectors[2]
@@ -1655,14 +1651,33 @@
         rotp = na.concatenate([na.linalg.pinv(self.unit_vectors).ravel('F'),
                                back_center])
 
-        sampler = ProjectionSampler(
-            rotp, normal_vector * width, back_center,
+        args = (rotp, normal_vector * width, back_center,
             (-width/2, width/2, -width/2, width/2),
             image, north_vector, east_vector,
             na.array([width, width, width], dtype='float64'))
-        
+        return args
+
+    def finalize_image(self,image):
+        pf = self.pf
+        if self.weight is None:
+            dl = self.width[2] * pf.units[pf.field_info[self.field].projection_conversion]
+            image *= dl
+        else:
+            image[:,:,0] /= image[:,:,1]
+            pf.field_info.pop("temp_weightfield")
+        return image[:,:,0]
+
+
+    def _render(self, double_check, num_threads, image, na, sampler):
         # Calculate the eight corners of the box
         # Back corners ...
+        pf = self.pf
+        width = self.width[2]
+        north_vector = self.unit_vectors[0]
+        east_vector = self.unit_vectors[1]
+        normal_vector = self.unit_vectors[2]
+        fields = self.fields
+
         mi = pf.domain_right_edge.copy()
         ma = pf.domain_left_edge.copy()
         for off1 in [-1, 1]:
@@ -1687,14 +1702,44 @@
             sampler(pg, num_threads = num_threads)
             pb.update(i)
         pb.finish()
+        
         image = sampler.aimage
-        if self.weight is None:
-            dl = width * pf.units[pf.field_info[self.field].projection_conversion]
-            image *= dl
+        self.finalize_image(image)
+        return image
+
+    def save_image(self, fn, clip_ratio, image):
+        print 'I am here!'
+        print fn 
+        if self.pf.field_info[self.field].take_log:
+            im = na.log10(image)
         else:
-            image[:,:,0] /= image[:,:,1]
-            pf.field_info.pop("temp_weightfield")
-        return image[:,:,0]
+            im = image
+        if self.comm.rank is 0 and fn is not None:
+            if clip_ratio is not None:
+                write_image(im, fn)
+            else:
+                write_image(im, fn)
+
+    def snapshot(self, fn = None, clip_ratio = None, double_check = False,
+                 num_threads = 0):
+
+        fields = [self.field]
+        resolution = self.resolution
+        pf = self.pf
+        if self.weight is not None:
+            # This is a temporary field, which we will remove at the end.
+            def _make_wf(f, w):
+                def temp_weightfield(a, b):
+                    tr = b[f].astype("float64") * b[w]
+                    return tr
+                return temp_weightfield
+            pf.field_info.add_field("temp_weightfield",
+                function=_make_wf(self.field, self.weight))
+            fields = ["temp_weightfield", self.weight]
+        self.fields = fields
+        return Camera.snapshot(self, fn = fn, clip_ratio = clip_ratio, double_check = double_check,
+                 num_threads = num_threads)
+
 
 data_object_registry["projection_camera"] = ProjectionCamera
 



https://bitbucket.org/yt_analysis/yt/changeset/88016491ea75/
changeset:   88016491ea75
branch:      yt
user:        samskillman
date:        2012-06-05 16:56:26
summary:     Merging volume_refactor bookmark into yt branch, which involved some manual merging
affected #:  78 files

diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
 include distribute_setup.py
-recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
-recursive-include yt/ *.pyx *.pxd *.hh *.h README* 
+recursive-include yt/gui/reason/html *.html *.png *.ico *.js
+recursive-include yt *.pyx *.pxd *.hh *.h README* 


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -42,7 +42,7 @@
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
-INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
+INST_0MQ=0      # Install 0mq (for IPython) and affiliated bindings?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -353,7 +353,7 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
 echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
@@ -366,7 +366,7 @@
 echo 'ffc5c9e0c8c8ea66479abd467e442419bd1c867e6dbd180be6a032869467955dc570cfdf1388452871303a440738f302d3227ab7728878c4a114cfc45d29d23c  ipython-0.12.tar.gz' > ipython-0.12.tar.gz.sha512
 echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
 echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
+echo '702f67c48e4dbe191dbe5ca0df6b5a84fa4f5c424cf1fae60b5053dfe6532531330738c7aa3012d900d49efdd743cd1ebc238bb15f354f67228e2a2c95b98a89  mercurial-2.2.tar.gz' > mercurial-2.2.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -388,10 +388,10 @@
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
-get_enzotools mercurial-2.0.tar.gz
+get_enzotools mercurial-2.2.tar.gz
 get_enzotools ipython-0.12.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
-get_enzotools Cython-0.15.1.tar.gz
+get_enzotools Cython-0.16.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -531,7 +531,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.0
+    do_setup_py mercurial-2.2
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -631,7 +631,7 @@
 
 do_setup_py ipython-0.12
 do_setup_py h5py-2.0.1
-do_setup_py Cython-0.15.1
+do_setup_py Cython-0.16
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,5 +1,6 @@
 #!python
 import os, re
+from distutils import version
 from yt.mods import *
 from yt.data_objects.data_containers import AMRData
 namespace = locals().copy()
@@ -22,6 +23,11 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
+if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+    api_version = '0.10'
+else:
+    api_version = '0.11'
+
 if IPython.__version__.startswith("0.10"):
     api_version = '0.10'
 elif IPython.__version__.startswith("0.11") or \


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -438,6 +438,91 @@
         (4. / 3. * math.pi * rho_crit * \
         (self.radial_bins * cm) ** 3.0)
 
+class RockstarHalo(Halo):
+    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
+                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
+        """Implement the properties reported by Rockstar: ID, Descendant ID,
+           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
+           Most defaults are removed since we don't read in which halos
+           particles belong to. 
+        """
+        #we can still use get_sphere!
+        self.ID = ID #from rockstar
+        self.id = index #index in the halo list
+        self.pf = halo_list.pf
+
+        self.DescID = DescID
+        self.Mvir = Mvir
+        self.Vmax = Vmax
+        self.Vrms = Vrms
+        self.Rvir = Rvir
+        self.Rs   = Rs
+        self.Np   = Np
+        self.X    = X
+        self.Y    = Y
+        self.Z    = Z
+        self.VX   = VX
+        self.VY   = VY
+        self.VZ   = VZ
+        self.JX   = JX
+        self.JY   = JY
+        self.JZ   = JZ
+        self.Spin = Spin
+
+        #Halo.__init__(self,halo_list,index,
+        self.size=Np 
+        self.CoM=na.array([X,Y,Z])
+        self.max_dens_point=-1
+        self.group_total_mass=-1
+        self.max_radius=Rvir
+        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.rms_vel=-1
+        self.group_total_mass = -1 #not implemented 
+    
+    def maximum_density(self):
+        r"""Not implemented."""
+        return -1
+
+    def maximum_density_location(self):
+        r"""Not implemented."""
+        return self.center_of_mass()
+
+    def total_mass(self):
+        r"""Not implemented."""
+        return -1
+
+    def get_size(self):
+        r"""Return the number of particles belonging to the halo."""
+        return self.Np
+
+    def write_particle_list(self,handle):
+        r"""Not implemented."""
+        return -1
+
+    def virial_mass(self):
+        r"""Virial mass in Msun/h"""
+        return self.Mvir
+
+    def virial_radius(self):
+        r"""Virial radius in Mpc/h comoving"""
+        return self.Rvir
+
+    def virial_bin(self):
+        r"""Not implemented"""
+        return -1
+
+    def virial_density(self):
+        r"""Not implemented """
+        return -1
+
+    def virial_info(self):
+        r"""Not implemented"""
+        return -1 
+
+    def __getitem__(self,key):
+        r"""Not implemented"""
+        return None
+
 
 class HOPHalo(Halo):
     _name = "HOPHalo"
@@ -903,6 +988,97 @@
             f.flush()
         f.close()
 
+class RockstarHaloList(HaloList):
+    #because we don't yet no halo-particle affiliations
+    #most of the halo list methods are not implemented
+    #furthermore, Rockstar only accepts DM particles of
+    #a fixed mass, so we don't allow stars at all
+    #Still, we inherit from HaloList because in the future
+    #we might implement halo-particle affiliations
+    def __init__(self,pf,out_list):
+        mylog.info("Initializing Rockstar List")
+        self._data_source = None
+        self._groups = []
+        self._max_dens = -1
+        self.pf = pf
+        self.out_list = out_list
+        mylog.info("Parsing Rockstar halo list")
+        self._parse_output(out_list)
+        mylog.info("Finished %s"%out_list)
+
+    def _run_finder(self):
+        pass
+
+    def __obtain_particles(self):
+        pass
+
+    def _get_dm_indices(self):
+        pass
+
+    def _parse_output(self,out_list=None):
+        """
+        Read the out_*.list text file produced
+        by Rockstar into memory."""
+        
+        pf = self.pf
+
+        if out_list is None:
+            out_list = self.out_list
+
+        lines = open(out_list).readlines()
+        names = []
+        formats = []
+        
+        #find the variables names from the first defining line
+        names = lines[0].replace('#','').split(' ')
+        for j,line in enumerate(lines):
+            if not line.startswith('#'): break
+
+        #find out the table datatypes but evaluating the first data line
+        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
+        for num in splits:
+            if 'nan' not in num:
+                formats += na.array(eval(num)).dtype,
+            else:
+                formats += na.dtype('float'),
+        assert len(formats) == len(names)
+
+        #Jc = 1.98892e33/pf['mpchcm']*1e5
+        Jc = 1.0
+        conv = dict(X=1.0/pf['mpchcm'],
+                    Y=1.0/pf['mpchcm'],
+                    Z=1.0/pf['mpchcm'], #to unitary
+                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
+                    Mvir=1.0, #Msun/h
+                    Vmax=1e0,Vrms=1e0,
+                    Rvir=1.0/pf['kpchcm'],
+                    Rs=1.0/pf['kpchcm'],
+                    JX=Jc,JY=Jc,JZ=Jc)
+        dtype = {'names':names,'formats':formats}
+        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        #convert position units  
+        for name in names:
+            halo_table[name]=halo_table[name]*conv.get(name,1)
+        
+        for k,row in enumerate(halo_table):
+            args = tuple([val for val in row])
+            halo = RockstarHalo(self,k,*args)
+            self._groups.append(halo)
+    
+
+    #len is ok
+    #iter is OK
+    #getitem is ok
+    #nn is ok I think
+    #nn2d is ok I think
+
+    def write_out(self):
+        pass
+    def write_particle_list(self):
+        pass
+    
+
+    
 
 class HOPHaloList(HaloList):
 
@@ -1245,7 +1421,7 @@
         while index < self.group_count:
             self._groups[index] = self._halo_class(self, index, \
                 size=self.group_sizes[index], CoM=self.CoM[index], \
-                max_dens_point=self.max_dens_point[i], \
+                max_dens_point=self.max_dens_point[index], \
                 group_total_mass=self.Tot_M[index],
                 max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -25,9 +25,11 @@
 
 from yt.mods import *
 from os import environ
+from os import mkdir
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
 import rockstar_interface
 import socket
 import time
@@ -45,14 +47,28 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 0, num_writers = 0):
+    def __init__(self, pf, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,overwrite=False,
+            left_edge = None, right_edge = None):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         self.pf = pf
         self.hierarchy = pf.h
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
+        self.particle_mass = particle_mass 
+        self.overwrite = overwrite
+        if left_edge is None:
+            left_edge = pf.domain_left_edge
+        if right_edge is None:
+            right_edge = pf.domain_right_edge
+        self.le = left_edge
+        self.re = right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
+            print '%i reader + %i writers != %i mpi'%\
+                    (self.num_reader,self.num_writers,self.comm.size)
             raise RuntimeError
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = None
@@ -64,6 +80,9 @@
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
         data_source = self.pf.h.all_data()
+        if outbase is None:
+            outbase = str(self.pf)+'_rockstar'
+        self.outbase = outbase        
         self.handler = rockstar_interface.RockstarInterface(
                 self.pf, data_source)
 
@@ -80,16 +99,29 @@
             (server_address, port))
         self.port = str(self.port)
 
-    def run(self, block_ratio = 1):
+    def run(self, block_ratio = 1,**kwargs):
+        """
+        
+        """
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
-                    block_ratio = block_ratio)
+                    block_ratio = block_ratio,
+                    outbase = self.outbase,
+                    particle_mass = float(self.particle_mass),
+                    **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
@@ -97,9 +129,17 @@
             if self.workgroup.name == "server":
                 self.handler.start_server()
             elif self.workgroup.name == "readers":
-                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
             elif self.workgroup.name == "writers":
-                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
         self.comm.barrier()
+        #quickly rename the out_0.list 
+    
+    def halo_list(self,file_name='out_0.list'):
+        """
+        Reads in the out_0.list file and generates RockstarHaloList
+        and RockstarHalo objects.
+        """
+        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -241,7 +241,7 @@
 cdef RockstarInterface rh
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
     cdef int i, fi, npart, tnpart
-    cdef np.float64_t conv[6], left_edge[6]
+    cdef np.float64_t conv[6], left_edge[6], right_edge[3]
     dd = rh.data_source
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
@@ -257,9 +257,12 @@
     #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = rh.le[0]
+    left_edge[1] = rh.le[1]
+    left_edge[2] = rh.le[2]
+    right_edge[0] = rh.re[0]
+    right_edge[1] = rh.re[1]
+    right_edge[2] = rh.re[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
@@ -274,11 +277,15 @@
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
             for i in range(npart):
+                if fi<3: 
+                    if  left_edge[i] > arr[i]: continue
+                    if right_edge[i] < arr[i]: continue
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
     num_p[0] = tnpart
-    print "TOTAL", block, pi, tnpart, len(grids)
+    print "Block #%i | Particles %i | Grids %i"%\
+            ( block, pi, len(grids))
 
 cdef class RockstarInterface:
 
@@ -296,12 +303,14 @@
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
-                       int writing_port = -1, int block_ratio = 1):
+                       int writing_port = -1, int block_ratio = 1,
+                       int periodic = 1, int min_halo_size = 20,
+                       char *outbase = 'None'):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -324,12 +333,18 @@
         h0 = self.pf.hubble_constant
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
+        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            print 'using %s as outbase'%outbase
+            OUTBASE = outbase 
 
         if particle_mass < 0:
             print "Assuming single-mass particle."
             particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
-        PERIODIC = 1
+        PERIODIC = periodic
         BOX_SIZE = (self.pf.domain_right_edge[0] -
                     self.pf.domain_left_edge[0]) * self.pf['mpchcm']
         setup_config()


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,4 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    get_halo_sphere, \
     standard_fields


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -43,14 +43,14 @@
 @add_function("Min_Dark_Matter_Density")
 def find_minimum_dm_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Dark_Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Dark_Matter_Density")
 def find_maximum_dm_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Dark_Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -58,7 +58,7 @@
 def find_CoM_dm_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=False, 
                                                       use_particles=True,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -67,14 +67,14 @@
 @add_function("Min_Gas_Density")
 def find_minimum_gas_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Gas_Density")
 def find_maximum_gas_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -82,7 +82,7 @@
 def find_CoM_gas_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
                                                       use_particles=False,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -91,14 +91,14 @@
 @add_function("Min_Total_Density")
 def find_minimum_total_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Total_Density")
 def find_maximum_total_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -106,7 +106,7 @@
 def find_CoM_total_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
                                                       use_particles=True,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -115,14 +115,14 @@
 @add_function("Min_Temperature")
 def find_minimum_temperature(data):
     ma, mini, mx, my, mz, mg = data.quantities['MinLocation']('Temperature',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Temperature")
 def find_maximum_temperature(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Temperature',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -64,7 +64,7 @@
                                          dm_only=False, resize=True, 
                                          fancy_padding=True, rearrange=True),
                  halo_radius=None, radius_units='1', n_profile_bins=50,
-                 recenter = None,
+                 recenter=None,
                  profile_output_dir='radial_profiles', projection_output_dir='projections',
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
                  velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'], 
@@ -111,8 +111,32 @@
             Args given with call to halo finder function.  Default: None.
         halo_finder_kwargs : dict
             kwargs given with call to halo finder function. Default: None.
-        recenter : {string, function
-            The name of a function that recenters the halo for analysis.
+        recenter : {string, function}
+            The exact location of the sphere center can significantly affect 
+            radial profiles.  The halo center loaded by the HaloProfiler will 
+            typically be the dark matter center of mass calculated by a halo 
+            finder.  However, this may not be the best location for centering 
+            profiles of baryon quantities.  For example, one may want to center 
+            on the maximum density.
+            If recenter is given as a string, one of the existing recentering 
+            functions will be used:
+                Min_Dark_Matter_Density : location of minimum dark matter density
+                Max_Dark_Matter_Density : location of maximum dark matter density
+                CoM_Dark_Matter_Density : dark matter center of mass
+                Min_Gas_Density : location of minimum gas density
+                Max_Gas_Density : location of maximum gas density
+                CoM_Gas_Density : gas center of mass
+                Min_Total_Density : location of minimum total density
+                Max_Total_Density : location of maximum total density
+                CoM_Total_Density : total center of mass
+                Min_Temperature : location of minimum temperature
+                Max_Temperature : location of maximum temperature
+            Alternately, a function can be supplied for custom recentering.
+            The function should take only one argument, a sphere object.
+                Example function:
+                    def my_center_of_mass(data):
+                       my_x, my_y, my_z = data.quantities['CenterOfMass']()
+                       return (my_x, my_y, my_z)
             Default: None.
         halo_radius : float
             If no halo radii are provided in the halo list file, this
@@ -148,8 +172,7 @@
                 * ["bulk", "sphere"]: the bulk velocity of the sphere
                   centered on the halo center.
     	        * ["max", field]: the velocity of the cell that is the
-    	          location of the maximum of the field 
-                  specified (used only when halos set to single).
+    	          location of the maximum of the field specified.
         filter_quantities : array_like
             Quantities from the original halo list file to be written out in the 
             filtered list file.  Default: ['id','center'].
@@ -161,8 +184,8 @@
         
         Examples
         --------
-        >>> import yt.analysis_modules.halo_profiler.api as HP
-        >>> hp = HP.halo_profiler("DD0242/DD0242")
+        >>> from yt.analysis_modules.halo_profiler.api import *
+        >>> hp = HaloProfiler("RedshiftOutput0005/RD0005")
         
         """
         ParallelAnalysisInterface.__init__(self)
@@ -226,13 +249,9 @@
         # Option to recenter sphere someplace else.
         self.recenter = recenter
 
-        # Look for any field that might need to have the bulk velocity set.
+        # Flag for whether calculating halo bulk velocity is necessary.
         self._need_bulk_velocity = False
-        for field in [hp['field'] for hp in self.profile_fields]:
-            if 'Velocity' in field or 'Mach' in field:
-                self._need_bulk_velocity = True
-                break
-
+        
         # Check validity for VelocityCenter parameter which toggles how the 
         # velocity is zeroed out for radial velocity profiles.
         self.velocity_center = velocity_center[:]
@@ -250,9 +269,7 @@
                 mylog.error("Second value of VelocityCenter must be either 'halo' or 'sphere' if first value is 'bulk'.")
                 return None
         elif self.velocity_center[0] == 'max':
-            if self.halos is 'multiple':
-                mylog.error("Getting velocity center from a max field value only works with halos='single'.")
-                return None
+            mylog.info('Using position of max %s for velocity center.' % self.velocity_center[1])
         else:
             mylog.error("First value of parameter, VelocityCenter, must be either 'bulk' or 'max'.")
             return None
@@ -284,7 +301,7 @@
                 mylog.error("No halos loaded, there will be nothing to do.")
                 return None
         else:
-            mylog.error("I don't know whether to get halos from hop or from density maximum.  This should not have happened.")
+            mylog.error("Keyword, halos, must be either 'single' or 'multiple'.")
             return None
 
     def add_halo_filter(self, function, *args, **kwargs):
@@ -351,6 +368,10 @@
             
         """
 
+        # Check for any field that might need to have the bulk velocity set.
+        if 'Velocity' in field or 'Mach' in field:
+            self._need_bulk_velocity = True
+
         self.profile_fields.append({'field':field, 'weight_field':weight_field, 
                                     'accumulation':accumulation})
 
@@ -379,11 +400,15 @@
 
         """
 
+        # Check for any field that might need to have the bulk velocity set.
+        if 'Velocity' in field or 'Mach' in field:
+            self._need_bulk_velocity = True
+
         self.projection_fields.append({'field':field, 'weight_field':weight_field, 
                                        'cmap': cmap})
 
     @parallel_blocking_call
-    def make_profiles(self, filename=None, prefilters=None, **kwargs):
+    def make_profiles(self, filename=None, prefilters=None, njobs=-1):
         r"""Make radial profiles for all halos in the list.
         
         After all the calls to `add_profile`, this will trigger the actual
@@ -394,7 +419,7 @@
         filename : string
             If set, a file will be written with all of the filtered halos
             and the quantities returned by the filter functions.
-            Default=None.
+            Default: None.
         prefilters : array_like
             A single dataset can contain thousands or tens of thousands of
             halos. Significant time can be saved by not profiling halos
@@ -402,6 +427,11 @@
             Simple filters based on quantities provided in the initial
             halo list can be used to filter out unwanted halos using this
             parameter.
+            Default: None.
+        njobs : int
+            The number of jobs over which to split the profiling.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
         
         Examples
         --------
@@ -454,7 +484,7 @@
 
         # Profile all halos.
         updated_halos = []
-        for halo in parallel_objects(self.all_halos, -1):
+        for halo in parallel_objects(self.all_halos, njobs=njobs):
             # Apply prefilters to avoid profiling unwanted halos.
             filter_result = True
             haloQuantities = {}
@@ -468,7 +498,8 @@
 
                 profile_filename = "%s/Halo_%04d_profile.dat" % (my_output_dir, halo['id'])
 
-                profiledHalo = self._get_halo_profile(halo, profile_filename, virial_filter=virial_filter)
+                profiledHalo = self._get_halo_profile(halo, profile_filename,
+                                                      virial_filter=virial_filter)
 
                 if profiledHalo is None:
                     continue
@@ -487,26 +518,26 @@
                 for quantity in self.filter_quantities:
                     if halo.has_key(quantity): haloQuantities[quantity] = halo[quantity]
 
-                self.filtered_halos.append(haloQuantities)
+                only_on_root(self.filtered_halos.append, haloQuantities)
 
             # If we've gotten this far down, this halo is good and we want
             # to keep it. But we need to communicate the recentering changes
             # to all processors (the root one in particular) without having
             # one task clobber the other.
-            updated_halos.append(halo)
-        
+            only_on_root(updated_halos.append, halo)
+
         # And here is where we bring it all together.
         updated_halos = self.comm.par_combine_object(updated_halos,
                             datatype="list", op="cat")
-        updated_halos.sort(key = lambda a:a['id'])
+        updated_halos.sort(key=lambda a:a['id'])
         self.all_halos = updated_halos
 
         self.filtered_halos = self.comm.par_combine_object(self.filtered_halos,
                             datatype="list", op="cat")
-        self.filtered_halos.sort(key = lambda a:a['id'])
+        self.filtered_halos.sort(key=lambda a:a['id'])
 
         if filename is not None:
-            self._write_filtered_halo_list(filename, **kwargs)
+            self._write_filtered_halo_list(filename)
 
     def _get_halo_profile(self, halo, filename, virial_filter=True,
             force_write=False):
@@ -529,31 +560,13 @@
                 return None
 
             # get a sphere object to profile
-            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            sphere = self._get_halo_sphere(halo)
             if sphere is None: return None
 
-            if self._need_bulk_velocity:
-                # Set bulk velocity to zero out radial velocity profiles.
-                if self.velocity_center[0] == 'bulk':
-                    if self.velocity_center[1] == 'halo':
-                        sphere.set_field_parameter('bulk_velocity', halo['velocity'])
-                    elif self.velocity_center[1] == 'sphere':
-                        sphere.set_field_parameter('bulk_velocity', 
-                                                   sphere.quantities['BulkVelocity'](lazy_reader=False, 
-                                                                                     preload=False))
-                    else:
-                        mylog.error("Invalid parameter: VelocityCenter.")
-                elif self.velocity_center[0] == 'max':
-                    max_grid, max_cell, max_value, max_location = \
-                        self.pf.h.find_max_cell_location(self.velocity_center[1])
-                    sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
-                                                                 max_grid['y-velocity'][max_cell],
-                                                                 max_grid['z-velocity'][max_cell]])
-
             try:
                 profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
                                                 r_min, halo['r_max'],
-                                                log_space=True, lazy_reader=False,
+                                                log_space=True, lazy_reader=True,
                                                 end_collect=True)
             except EmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
@@ -586,9 +599,75 @@
 
         return profile
 
+    def _get_halo_sphere(self, halo):
+        """
+        Returns a sphere object for a given halo, performs the recentering,
+        and calculates bulk velocities.
+        """
+
+        sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+        if len(sphere._grids) == 0: return None
+        new_sphere = False
+
+        if self.recenter:
+            old = halo['center']
+            if self.recenter in centering_registry:
+                new_x, new_y, new_z = \
+                    centering_registry[self.recenter](sphere)
+            else:
+                # user supplied function
+                new_x, new_y, new_z = self.recenter(sphere)
+            if new_x < self.pf.domain_left_edge[0] or \
+                    new_y < self.pf.domain_left_edge[1] or \
+                    new_z < self.pf.domain_left_edge[2]:
+                mylog.info("Recentering rejected, skipping halo %d" % \
+                    halo['id'])
+                return None
+            halo['center'] = [new_x, new_y, new_z]
+            d = self.pf['kpc'] * periodic_dist(old, halo['center'],
+                self.pf.domain_right_edge - self.pf.domain_left_edge)
+            mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+            # Expand the halo to account for recentering. 
+            halo['r_max'] += d / 1000. # d is in kpc -> want mpc
+            new_sphere = True
+
+        if new_sphere:
+            # Temporary solution to memory leak.
+            for g in self.pf.h.grids:
+                g.clear_data()
+            sphere.clear_data()
+            del sphere
+            sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+
+        if self._need_bulk_velocity:
+            # Set bulk velocity to zero out radial velocity profiles.
+            if self.velocity_center[0] == 'bulk':
+                if self.velocity_center[1] == 'halo':
+                    sphere.set_field_parameter('bulk_velocity', halo['velocity'])
+                elif self.velocity_center[1] == 'sphere':
+                    mylog.info('Calculating sphere bulk velocity.')
+                    sphere.set_field_parameter('bulk_velocity', 
+                                               sphere.quantities['BulkVelocity'](lazy_reader=True, 
+                                                                                 preload=False))
+                else:
+                    mylog.error("Invalid parameter: velocity_center.")
+                    return None
+            elif self.velocity_center[0] == 'max':
+                mylog.info('Setting bulk velocity with value at max %s.' % self.velocity_center[1])
+                max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
+                                                                                 lazy_reader=True)
+                max_grid = self.pf.h.grids[mg]
+                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
+                                                             max_grid['y-velocity'][max_cell],
+                                                             max_grid['z-velocity'][max_cell]])
+            mylog.info('Bulk velocity set.')
+
+        return sphere
+
     @parallel_blocking_call
     def make_projections(self, axes=[0, 1, 2], halo_list='filtered',
-            save_images=False, save_cube=True):
+                         save_images=False, save_cube=True, njobs=-1):
         r"""Make projections of all halos using specified fields.
         
         After adding fields using `add_projection`, this starts the actual
@@ -608,6 +687,10 @@
         save_cube : bool
             Whether or not to save the HDF5 files of the halo projections.
             Default=True.
+        njobs : int
+            The number of jobs over which to split the projections.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
         
         Examples
         --------
@@ -656,7 +739,7 @@
                          self.pf.parameters['DomainRightEdge'][w])
                   for w in range(self.pf.parameters['TopGridRank'])]
 
-        for halo in parallel_objects(halo_projection_list, -1):
+        for halo in parallel_objects(halo_projection_list, njobs=njobs):
             if halo is None:
                 continue
             # Check if region will overlap domain edge.
@@ -745,7 +828,7 @@
 
     @parallel_blocking_call
     def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
-                             analysis_output_dir=None):
+                             analysis_output_dir=None, njobs=-1):
         r"""Perform custom analysis on all halos.
         
         This will loop through all halo on the HaloProfiler's list, 
@@ -768,6 +851,10 @@
         analysis_output_dir : string, optional
             If specified, this directory will be created within the dataset to 
             contain any output from the analysis function.  Default: None.
+        njobs : int
+            The number of jobs over which to split the analysis.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
 
         Examples
         --------
@@ -803,11 +890,11 @@
                 my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
             self.__check_directory(my_output_dir)
 
-        for halo in parallel_objects(halo_analysis_list, -1):
+        for halo in parallel_objects(halo_analysis_list, njobs=njobs):
             if halo is None: continue
 
             # Get a sphere object to analze.
-            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            sphere = self._get_halo_sphere(halo)
             if sphere is None: continue
 
             # Call the given analysis function.
@@ -1042,94 +1129,6 @@
         else:
             os.mkdir(my_output_dir)
 
-def get_halo_sphere(halo, pf, recenter=None):
-    r"""Returns a sphere object for a given halo.
-        
-    With a dictionary containing halo properties, such as center 
-    and r_max, this creates a sphere object and optionally 
-    recenters and recreates the sphere using a recentering function.
-    This is to be used primarily to make spheres for a set of halos 
-    loaded by the HaloProfiler.
-    
-    Parameters
-    ----------
-    halo : dict, required
-        The dictionary containing halo properties used to make the sphere.
-        Required entries:
-            center : list with center coordinates.
-            r_max : sphere radius in Mpc.
-    pf : parameter file object, required
-        The parameter file from which the sphere will be made.
-    recenter : {None, string or function}
-        The exact location of the sphere center can significantly affect 
-        radial profiles.  The halo center loaded by the HaloProfiler will 
-        typically be the dark matter center of mass calculated by a halo 
-        finder.  However, this may not be the best location for centering 
-        profiles of baryon quantities.  For example, one may want to center 
-        on the maximum density.
-        If recenter is given as a string, one of the existing recentering 
-        functions will be used:
-            Min_Dark_Matter_Density : location of minimum dark matter density
-            Max_Dark_Matter_Density : location of maximum dark matter density
-            CoM_Dark_Matter_Density : dark matter center of mass
-            Min_Gas_Density : location of minimum gas density
-            Max_Gas_Density : location of maximum gas density
-            CoM_Gas_Density : gas center of mass
-            Min_Total_Density : location of minimum total density
-            Max_Total_Density : location of maximum total density
-            CoM_Total_Density : total center of mass
-            Min_Temperature : location of minimum temperature
-            Max_Temperature : location of maximum temperature
-        Alternately, a function can be supplied for custom recentering.
-        The function should take only one argument, a sphere object.
-            Example function:
-                def my_center_of_mass(data):
-                   my_x, my_y, my_z = data.quantities['CenterOfMass']()
-                   return (my_x, my_y, my_z)
-
-        Examples: this should primarily be used with the halo list of the HaloProfiler.
-        This is an example with an abstract halo asssuming a pre-defined pf.
-        >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
-        >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
-        >>> # Assuming the above example function has been defined.
-        >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
-    """
-        
-    sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
-    if len(sphere._grids) == 0: return None
-    new_sphere = False
-
-    if recenter:
-        old = halo['center']
-        if recenter in centering_registry:
-            new_x, new_y, new_z = \
-                centering_registry[recenter](sphere)
-        else:
-            # user supplied function
-            new_x, new_y, new_z = recenter(sphere)
-        if new_x < pf.domain_left_edge[0] or \
-                new_y < pf.domain_left_edge[1] or \
-                new_z < pf.domain_left_edge[2]:
-            mylog.info("Recentering rejected, skipping halo %d" % \
-                halo['id'])
-            return None
-        halo['center'] = [new_x, new_y, new_z]
-        d = pf['kpc'] * periodic_dist(old, halo['center'],
-            pf.domain_right_edge - pf.domain_left_edge)
-        mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
-        # Expand the halo to account for recentering. 
-        halo['r_max'] += d / 1000 # d is in kpc -> want mpc
-        new_sphere = True
-
-    if new_sphere:
-        # Temporary solution to memory leak.
-        for g in pf.h.grids:
-            g.clear_data()
-        sphere.clear_data()
-        del sphere
-        sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
-    return sphere
-
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """
     Shift projection data around.


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -190,7 +190,7 @@
             elif (child._isValid()):
                 these_children.append(child)
             else:
-                print "Eliminating invalid, childless clump with %d cells." % len(child.data["CellMassMsun"])
+                print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
         if (len(these_children) > 1):
             print "%d of %d children survived." % (len(these_children),len(clump.children))            
             clump.children = these_children


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -246,7 +246,7 @@
 """
 
 class SpectrumBuilder(object):
-    def __init__(self, pf, bcdir="", model="chabrier"):
+    def __init__(self, pf, bcdir="", model="chabrier", time_now=None):
         r"""Initialize the data to build a summed flux spectrum for a
         collection of stars using the models of Bruzual & Charlot (2003).
         This function loads the necessary data tables into memory and
@@ -280,8 +280,12 @@
              OmegaLambdaNow = self._pf.omega_lambda,
              InitialRedshift = self._pf['CosmologyInitialRedshift'])
         # Find the time right now.
-        self.time_now = self.cosm.ComputeTimeFromRedshift(
-            self._pf.current_redshift) # seconds
+        
+        if time_now is None:
+            self.time_now = self.cosm.ComputeTimeFromRedshift(
+                self._pf.current_redshift) # seconds
+        else:
+            self.time_now = time_now
         
         # Read the tables.
         self.read_bclib()
@@ -404,7 +408,8 @@
         self.star_metal = self.star_metal[sort]
         
         # Interpolate the flux for each star, adding to the total by weight.
-        for star in itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass):
+        pbar = get_pbar("Calculating fluxes",len(self.star_mass))
+        for i,star in enumerate(itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass)):
             # Pick the right age bin for the right flux array.
             flux = self.flux[star[0]][star[1],:]
             # Get the one just before the one above.
@@ -413,6 +418,9 @@
             int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
             # Add this flux to the total, weighted by mass.
             self.final_spec += na.power(10., int_flux) * star[4]
+            pbar.update(i)
+        pbar.finish()    
+        
         # Normalize.
         self.total_mass = na.sum(self.star_mass)
         self.avg_mass = na.mean(self.star_mass)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -1,6 +1,8 @@
 """
 Code to export from yt to Sunrise
 
+Author: Chris Moody <juxtaposicion at gmail.com>
+Affiliation: UCSC
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Homepage: http://yt-project.org/
@@ -26,8 +28,7 @@
 
 try:
     import pyfits
-except ImportError:
-    # We silently fail here
+except ImportError: 
     pass
 
 import time
@@ -36,9 +37,11 @@
 from yt.funcs import *
 import yt.utilities.amr_utils as amr_utils
 from yt.data_objects.universal_fields import add_field
+from yt.mods import *
 
-def export_to_sunrise(pf, fn, write_particles = True, subregion_bounds = None,
-    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None):
+debug = True
+
+def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -54,18 +57,13 @@
     pf : `StaticOutput`
         The parameter file to convert.
     fn : string
-        The filename of the FITS file.
-    write_particles : bool or pyfits.ColDefs instance, default is True
-        Whether to write out the star particles or not.  If this variable is an
-        instance of pyfits.ColDefs, then this will be used to create a pyfits
-        table named PARTICLEDATA which will be appended.  If this is true, the
-        routine will attempt to create this table from hand.
-    subregion_bounds : list of tuples
-        This is a list of tuples describing the subregion of the top grid to
-        export.  This will only work when only *one* root grid exists.
-        It is of the format:
-        [ (start_index_x, nx), (start_index_y, ny), (start_index_z, nz) ]
-        where nx, ny, nz are the number of cells to extract.
+        The filename of the output FITS file.
+    dle : The domain left edge to extract
+    dre : The domain rght edge to extract
+        Array format is (nx,ny,nz) where each element is floating point
+        in unitary position units where 0 is leftmost edge and 1
+        the rightmost. 
+        
 
     Notes
     -----
@@ -74,144 +72,250 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-    # Now particles
-    #  output_file->addTable("PARTICLEDATA" , 0);
-    # addKey("timeunit", time_unit, "Time unit is "+time_unit);
-    # addKey("tempunit", temp_unit, "Temperature unit is "+temp_unit);
-    # 
-    # addColumn(Tint, "ID", 1, "" );
-    # addColumn(Tdouble, "position", 3, length_unit );
-    # addColumn(Tdouble, "stellar_radius", 1, length_unit );
-    # addColumn(Tdouble, "L_bol", 1, L_bol_unit );
-    # addColumn(Tdouble, "mass_stars", 1, mass_unit );
-    # addColumn(Tdouble, "mass_stellar_metals", 1, mass_unit );
-    # addColumn(Tdouble, "age_m", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tdouble, "age_l", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tfloat, "L_lambda", L_lambda.columns(), 
-    #			L_lambda_unit );
-    #	output->addKey("logflux", true, "Column L_lambda values are log (L_lambda)");
+    
+    #we must round the dle,dre to the nearest root grid cells
+    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
+    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
+    mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
-    col_list = []
-    if subregion_bounds == None:    
-        DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        DX = pf.domain_dimensions
-    else:
-        DLE, DX = zip(*subregion_bounds)
-        DLE, DX = na.array(DLE), na.array(DX)
-        DRE = DLE + DX
-    reg = pf.h.region((DRE+DLE)/2.0, DLE, DRE)
 
-    if write_particles is True:
-        pi = reg["particle_type"] == 2
-        pos = na.array([reg["particle_position_%s" % ax][pi]*pf['kpc']
-                            for ax in 'xyz']).transpose()
-        vel = na.array([reg["particle_velocity_%s" % ax][pi]
-                            for ax in 'xyz']).transpose()
-        # Velocity is cm/s, we want it to be kpc/yr
-        vel *= (pf["kpc"]/pf["cm"]) / (365*24*3400.)
-        age = pf["years"] * (pf.current_time - reg["creation_time"][pi])
-        creation_time = reg["creation_time"][pi] * pf["years"]
+    #Create the refinement hilbert octree in GRIDSTRUCTURE
+    #For every leaf (not-refined) cell we have a column n GRIDDATA
+    #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
+    #since the octree always starts with one cell, an our 0-level mesh
+    #may have many cells, we must #create the octree region sitting 
+    #ontop of the first mesh by providing a negative level
+    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
 
-        initial_mass = reg["InitialMassCenOstriker"][pi]
-        current_mass = reg["ParticleMassMsun"][pi]
-        col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
-        col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
-        col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
-        col_list.append(pyfits.Column("formation_time", format="D", array=creation_time, unit="yr"))
-        col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-        col_list.append(pyfits.Column("age_m", format="D", array=age))
-        col_list.append(pyfits.Column("age_l", format="D", array=age))
-        #For particles, Sunrise takes 
-        #the dimensionless metallicity, not the mass of the metals
-        col_list.append(pyfits.Column("metallicity", format="D",
-            array=reg["metallicity_fraction"][pi],unit="Msun")) # wrong?
-        col_list.append(pyfits.Column("L_bol", format="D",
-            array=na.zeros(particle_mass.size)))
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
 
-        cols = pyfits.ColDefs(col_list)
-        pd_table = pyfits.new_table(cols)
-        pd_table.name = "PARTICLEDATA"
-    elif isinstance(write_particles, pyfits.ColDefs):
-        pd_table = pyfits.new_table(write_particles)
-        pd_table.name = "PARTICLEDATA"
-        write_particles = True
+    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
 
-    def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
-        
-    def _convMetalMass(data):
-        return 1.0/1.989e33
-        
-    add_field("MetalMass", function=_MetalMass,
-              convert_function=_convMetalMass)
+def prepare_octree(pf,ile,start_level=0):
+    add_fields() #add the metal mass field that sunrise wants
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    
+    #gather the field data from octs
+    pbar = get_pbar("Retrieving field data",len(fields))
+    field_data = [] 
+    dd = pf.h.all_data()
+    for fi,f in enumerate(fields):
+        field_data += dd[f],
+        pbar.update(fi)
+    pbar.finish()
+    del field_data
 
-    output, refined = generate_flat_octree(pf,
-            ["CellMassMsun","TemperatureTimesCellMassMsun", "MetalMass",
-             "CellVolumeCode"], subregion_bounds = subregion_bounds)
-    cvcgs = output["CellVolumeCode"].astype('float64') * pf['cm']**3.0
+    #first we cast every cell as an oct
+    #ngrids = na.max([g.id for g in pf._grids])
+    grids = {}
+    levels_all = {} 
+    levels_finest = {}
+    for l in range(100): 
+        levels_finest[l]=0
+        levels_all[l]=0
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+    for gi,g in enumerate(pf.h.grids):
+        ff = na.array([g[f] for f in fields])
+        og = amr_utils.OctreeGrid(
+                g.child_index_mask.astype('int32'),
+                ff.astype("float64"),
+                g.LeftEdge.astype("float64"),
+                g.ActiveDimensions.astype("int32"),
+                na.ones(1,dtype="float64")*g.dds[0],
+                g.Level,
+                g.id)
+        grids[g.id] = og
+        #how many refinement cells will we have?
+        #measure the 'volume' of each mesh, but many
+        #cells do not exist. an overstimate
+        levels_all[g.Level] += g.ActiveDimensions.prod()
+        #how many leaves do we have?
+        #this overestimates. a child of -1 means no child,
+        #but that cell may still be expanded on a submesh because
+        #(at least in ART) the meshes are inefficient.
+        g.clear_data()
+        pbar.update(gi)
+    pbar.finish()
+    
+    #create the octree grid list
+    oct_list =  amr_utils.OctreeGridList(grids)
+    
+    #initialize arrays to be passed to the recursion algo
+    o_length = na.sum(levels_all.values())
+    r_length = na.sum(levels_all.values())
+    output   = na.zeros((o_length,len(fields)), dtype='float64')
+    refined  = na.zeros(r_length, dtype='int32')
+    levels   = na.zeros(r_length, dtype='int32')
+    pos = position()
+    hs       = hilbert_state()
+    refined[0] = 1 #introduce the first cell as divided
+    levels[0]  = start_level-1 #introduce the first cell as divided
+    pos.refined_pos += 1
+    RecurseOctreeDepthFirstHilbert(
+            ile[0],ile[1],ile[2],
+            pos,0, hs, 
+            output,refined,levels,
+            grids,
+            start_level,
+            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
+            physical_center = ile,
+            #physical_width  = pf['kpc'])
+            physical_width  = pf.domain_dimensions)
+    #by time we get it here the 'current' position is actually 
+    #for the next spot, so we're off by 1
+    print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    output  = output[:pos.output_pos]
+    refined = refined[:pos.refined_pos] 
+    levels = levels[:pos.refined_pos] 
+    return output,refined
 
-    # First the structure
+def print_row(level,ple,pre,pw,pc,hs):
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple*pw-pc),
+    print '%1.5f %1.5f %1.5f '%tuple(pre*pw-pc),
+    print hs.dim, hs.sgn
+
+def print_child(level,grid,i,j,k,pw,pc):
+    ple = (grid.left_edges+na.array([i,j,k])*grid.dx)*pw-pc #parent LE 
+    pre = (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*pw-pc #parent RE 
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple),
+    print '%1.5f %1.5f %1.5f '%tuple(pre)
+
+def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
+                            curpos, gi, 
+                            hs,
+                            output,
+                            refined,
+                            levels,
+                            grids,
+                            level,
+                            physical_center=None,
+                            physical_width=None,
+                            printr=False):
+    grid = grids[gi]
+    m = 2**(-level-1) if level < 0 else 1
+    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
+    pre = ple+grid.dx*m
+    if printr:
+        print_row(level,ple,pre,physical_width,physical_center,hs)
+
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
+    for iv, (vertex,hs_child) in enumerate(hs):
+        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
+        #negative level indicates that we need to build a super-octree
+        if level < 0: 
+            #print ' '
+            #we are not on the root grid yet, but this is 
+            #how many equivalent root grid cells we would have
+            #level -1 means our oct grid's children are the same size
+            #as the root grid (hence the -level-1)
+            dx = 2**(-level-1) #this is the child width 
+            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
+            #we always refine the negative levels
+            refined[curpos.refined_pos] = 1
+            levels[curpos.refined_pos] = level
+            curpos.refined_pos += 1
+            RecurseOctreeDepthFirstHilbert(i, j, k,
+                                curpos, 0, hs_child, output, refined, levels, grids,
+                                level+1,
+                                physical_center=physical_center,
+                                physical_width=physical_width,)
+        else:
+            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
+            ci = grid.child_indices[i,j,k] #is this oct subdivided?
+            if ci == -1:
+                for fi in range(grid.fields.shape[0]):
+                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
+                refined[curpos.refined_pos] = 0
+                levels[curpos.refined_pos] = level
+                curpos.output_pos += 1 #position updated after write
+                curpos.refined_pos += 1
+                if printr:
+                    print_child(level+1,grid,i,j,k,physical_width,physical_center)
+            else:
+                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
+                cy = (grid.left_edges[1] + j*grid.dx[0])
+                cz = (grid.left_edges[2] + k*grid.dx[0])
+                refined[curpos.refined_pos] = 1
+                levels[curpos.refined_pos] = level
+                curpos.refined_pos += 1 #position updated after write
+                child_grid = grids[ci]
+                child_dx = child_grid.dx[0]
+                child_leftedges = child_grid.left_edges
+                child_i = int((cx - child_leftedges[0])/child_dx)
+                child_j = int((cy - child_leftedges[1])/child_dx)
+                child_k = int((cz - child_leftedges[2])/child_dx)
+                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
+                                    curpos, ci, hs_child, output, refined, levels, grids,
+                                    level+1,
+                                    physical_center=physical_center,
+                                    physical_width=physical_width)
+
+def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+
+    #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
     st_table = pyfits.new_table(cols)
     st_table.name = "GRIDSTRUCTURE"
+    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
+    fdx = fre-fle
+    for i,a in enumerate('xyz'):
+        st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
+        st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
+        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
+        #st_table.header.update("max%s" % a, 2) #
+        st_table.header.update("n%s" % a, fdx[i])
+        st_table.header.update("subdiv%s" % a, 2)
+    st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
 
-    # Now we update our table with units
-    # ("lengthunit", length_unit, "Length unit for grid");
-    # ("minx", getmin () [0], length_unit_comment);
-    # ("miny", getmin () [1], length_unit_comment);
-    # ("minz", getmin () [2], length_unit_comment);
-    # ("maxx", getmax () [0], length_unit_comment);
-    # ("maxy", getmax () [1], length_unit_comment);
-    # ("maxz", getmax () [2], length_unit_comment);
-    # ("nx", g_.getn () [0], "");
-    # ("ny", g_.getn () [1], "");
-    # ("nz", g_.getn () [2], "");
-    # ("subdivtp", subdivtp, "Type of grid subdivision");
-    # ("subdivx", sub_div[0], "");
-    # ("subdivy", sub_div[1], "");
-    # ("subdivz", sub_div[2], "");
-
-    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
-    for i,a in enumerate('xyz'):
-        st_table.header.update("min%s" % a, DLE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("max%s" % a, DRE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("n%s" % a, DX[i])
-        st_table.header.update("subdiv%s" % a, 2)
-    st_table.header.update("subdivtp", "UNIFORM", "Type of grid subdivision")
-
-    # Now grid data itself
-    # ("M_g_tot", total_quantities.m_g(), "[" + mass_unit +
-    #         "] Total gas mass in all cells");
-    # ("SFR_tot", total_quantities.SFR, "[" + SFR_unit +
-    #         "] Total star formation rate of all cells");
-    # ("timeunit", time_unit, "Time unit is "+time_unit);
-    # ("tempunit", temp_unit, "Temperature unit is "+time_unit);
-
-    # (Tdouble, "mass_gas", 1, mass_unit );
-    # (Tdouble, "SFR", 1, SFR_unit );
-    # (Tdouble, "mass_metals", 1, mass_unit );
-    # (Tdouble, "gas_temp_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "gas_teff_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "cell_volume", 1, length_unit + "^3" );
-
+    #not the hydro grid data
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    fd = {}
+    for i,f in enumerate(fields): 
+        fd[f]=output[:,i]
+    del output
     col_list = []
-    size = output["CellMassMsun"].size
-    tm = output["CellMassMsun"].sum()
+    size = fd["CellMassMsun"].size
+    tm = fd["CellMassMsun"].sum()
     col_list.append(pyfits.Column("mass_gas", format='D',
-                    array=output.pop('CellMassMsun'), unit="Msun"))
+                    array=fd['CellMassMsun'], unit="Msun"))
     col_list.append(pyfits.Column("mass_metals", format='D',
-                    array=output.pop('MetalMass'), unit="Msun"))
+                    array=fd['MetalMass'], unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stars", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("age_m", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("age_l", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("L_bol", format='D',
+    #                 array=na.zeros(size,dtype='D')))
+    # col_list.append(pyfits.Column("L_lambda", format='D',
+    #                 array=na.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
-                    array=output['TemperatureTimesCellMassMsun'], unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("gas_teff_m", format='D',
-                    array=output.pop('TemperatureTimesCellMassMsun'), unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("cell_volume", format='D',
-                    array=output.pop('CellVolumeCode').astype('float64')*pf['kpc']**3.0,
+                    array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
                     array=na.zeros(size, dtype='D')))
@@ -229,101 +333,216 @@
     md_table.header.update("snaptime", pf.current_time*pf['years'])
     md_table.name = "YT"
 
-    hls = [pyfits.PrimaryHDU(), st_table, mg_table,md_table]
-    if write_particles: hls.append(pd_table)
+    phdu = pyfits.PrimaryHDU()
+    phdu.header.update('nbodycod','yt')
+    hls = [phdu, st_table, mg_table,md_table]
+    hls.append(particle_data)
     hdus = pyfits.HDUList(hls)
     hdus.writeto(fn, clobber=True)
 
-def initialize_octree_list(pf, fields):
-    o_length = r_length = 0
-    grids = []
-    levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
-    for g in pf.h.grids:
-        ff = na.array([g[f] for f in fields])
-        grids.append(amr_utils.OctreeGrid(
-                        g.child_index_mask.astype('int32'),
-                        ff.astype("float64"),
-                        g.LeftEdge.astype('float64'),
-                        g.ActiveDimensions.astype('int32'),
-                        na.ones(1,dtype='float64') * g.dds[0], g.Level,
-                        g._id_offset))
-        levels_all[g.Level] += g.ActiveDimensions.prod()
-        levels_finest[g.Level] += g.child_mask.ravel().sum()
-        g.clear_data()
-    ogl = amr_utils.OctreeGridList(grids)
-    return ogl, levels_finest, levels_all
+def nearest_power(x):
+    #round to the nearest power of 2
+    x-=1
+    x |= x >> 1
+    x |= x >> 2 
+    x |= x >> 4
+    x |= x >> 8
+    x |= x >> 16
+    x+=1 
+    return x
 
-def generate_flat_octree(pf, fields, subregion_bounds = None):
-    """
-    Generates two arrays, one of the actual values in a depth-first flat
-    octree array, and the other of the values describing the refinement.
-    This allows for export to a code that understands this.  *field* is the
-    field used in the data array.
-    """
-    fields = ensure_list(fields)
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((o_length,len(fields)), dtype='float64')
-    refined = na.zeros(r_length, dtype='int32')
-    position = amr_utils.position()
-    if subregion_bounds is None:
-        sx, sy, sz = 0, 0, 0
-        nx, ny, nz = ogl[0].dimensions
-    else:
-        ss, ns = zip(*subregion_bounds)
-        sx, sy, sz = ss
-        nx, ny, nz = ns
-    print "Running from %s for %s cells" % (
-            (sx,sy,sz), (nx,ny,nz))
-    t1 = time.time()
-    amr_utils.RecurseOctreeDepthFirst(
-               sx, sy, sz, nx, ny, nz,
-               position, 0,
-               output, refined, ogl)
-    t2 = time.time()
-    print "Finished.  Took %0.3e seconds." % (t2-t1)
-    dd = {}
-    for i, field in enumerate(fields):
-        dd[field] = output[:position.output_pos,i]
-    return dd, refined[:position.refined_pos]
+def round_nearest_edge(pf,dle,dre):
+    dds = pf.domain_dimensions
+    ile = na.floor(dle*dds).astype('int')
+    ire = na.ceil(dre*dds).astype('int') 
+    
+    #this is the number of cells the super octree needs to expand to
+    #must round to the nearest power of 2
+    width = na.max(ire-ile)
+    width = nearest_power(width)
+    
+    maxlevel = na.rint(na.log2(width)).astype('int')
+    return ile,ire,maxlevel
 
-def generate_levels_octree(pf, fields):
-    fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
-    corners = na.zeros((r_length, 3), dtype='float64')
-    position = na.add.accumulate(
-                na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
-    pp = position.copy()
-    amr_utils.RecurseOctreeByLevels(0, 0, 0,
-               ogl[0].dimensions[0],
-               ogl[0].dimensions[1],
-               ogl[0].dimensions[2],
-               position.astype('int64'), 1,
-               output, genealogy, corners, ogl)
-    return output, genealogy, levels_all, levels_finest, pp, corners
+def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
+                          creation_time=None,initial_mass=None,
+                          current_mass=None,metallicity=None,
+                          radius = None,
+                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
+    dd = pf.h.all_data()
+    idx = dd["particle_type"] == star_type
+    if pos is None:
+        pos = na.array([dd["particle_position_%s" % ax]
+                        for ax in 'xyz']).transpose()
+    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    pos = pos[idx]*pf['kpc'] #unitary units -> kpc
+    if age is None:
+        age = dd["particle_age"][idx]*pf['years'] # seconds->years
+    if vel is None:
+        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+                        for ax in 'xyz']).transpose()
+        # Velocity is cm/s, we want it to be kpc/yr
+        #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
+        vel *= 1.02268944e-14 
+    if initial_mass is None:
+        #in solar masses
+        initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']
+    if current_mass is None:
+        #in solar masses
+        current_mass = dd["particle_mass"][idx]*pf['Msun']
+    if metallicity is None:
+        #this should be in dimensionless units, metals mass / particle mass
+        metallicity = dd["particle_metallicity"][idx]
+    if radius is None:
+        radius = initial_mass*0.0+10.0/1000.0 #10pc radius
 
-def _initial_mass_cen_ostriker(field, data):
-    # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
-    # Check Grid_AddToDiskProfile.C and star_maker7.src
-    star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
-    star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
-    dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
-    xv1 = ((data.pf["InitialTime"] - data["creation_time"])
-            / data["dynamical_time"])
-    xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
-            / data["dynamical_time"])
-    denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
-    minitial = data["ParticleMassMsun"] / denom
-    return minitial
-add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+    formation_time = pf.current_time-age
+    #create every column
+    col_list = []
+    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
+    col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
+    col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
+    col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
+    col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
+    col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
+    col_list.append(pyfits.Column("age_m", format="D", array=age))
+    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    #For particles, Sunrise takes 
+    #the dimensionless metallicity, not the mass of the metals
+    col_list.append(pyfits.Column("metallicity", format="D",
+        array=metallicity,unit="Msun")) 
+    col_list.append(pyfits.Column("L_bol", format="D",
+        array=na.zeros(current_mass.size)))
+    
+    #make the table
+    cols = pyfits.ColDefs(col_list)
+    pd_table = pyfits.new_table(cols)
+    pd_table.name = "PARTICLEDATA"
+    return pd_table
 
-def _temp_times_mass(field, data):
-    return data["Temperature"]*data["CellMassMsun"]
-add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
+def add_fields():
+    """Add three Eulerian fields Sunrise uses"""
+    def _MetalMass(field, data):
+        return data["Metal_Density"] * data["CellVolume"]
+        
+    def _convMetalMass(data):
+        return 1.0/1.989e33
+    
+    add_field("MetalMass", function=_MetalMass,
+              convert_function=_convMetalMass)
+
+    def _initial_mass_cen_ostriker(field, data):
+        # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
+        # Check Grid_AddToDiskProfile.C and star_maker7.src
+        star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
+        star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
+        dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
+        xv1 = ((data.pf["InitialTime"] - data["creation_time"])
+                / data["dynamical_time"])
+        xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
+                / data["dynamical_time"])
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        minitial = data["ParticleMassMsun"] / denom
+        return minitial
+
+    add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
+
+class position:
+    def __init__(self):
+        self.output_pos = 0
+        self.refined_pos = 0
+
+class hilbert_state():
+    def __init__(self,dim=None,sgn=None,octant=None):
+        if dim is None: dim = [0,1,2]
+        if sgn is None: sgn = [1,1,1]
+        if octant is None: octant = 5
+        self.dim = dim
+        self.sgn = sgn
+        self.octant = octant
+    def flip(self,i):
+        self.sgn[i]*=-1
+    def swap(self,i,j):
+        temp = self.dim[i]
+        self.dim[i]=self.dim[j]
+        self.dim[j]=temp
+        axis = self.sgn[i]
+        self.sgn[i] = self.sgn[j]
+        self.sgn[j] = axis
+    def reorder(self,i,j,k):
+        ndim = [self.dim[i],self.dim[j],self.dim[k]] 
+        nsgn = [self.sgn[i],self.sgn[j],self.sgn[k]]
+        self.dim = ndim
+        self.sgn = nsgn
+    def copy(self):
+        return hilbert_state([self.dim[0],self.dim[1],self.dim[2]],
+                             [self.sgn[0],self.sgn[1],self.sgn[2]],
+                             self.octant)
+    def descend(self,o):
+        child = self.copy()
+        child.octant = o
+        if o==0:
+            child.swap(0,2)
+        elif o==1:
+            child.swap(1,2)
+        elif o==2:
+            pass
+        elif o==3:
+            child.flip(0)
+            child.flip(2)
+            child.reorder(2,0,1)
+        elif o==4:
+            child.flip(0)
+            child.flip(1)
+            child.reorder(2,0,1)
+        elif o==5:
+            pass
+        elif o==6:
+            child.flip(1)
+            child.flip(2)
+            child.swap(1,2)
+        elif o==7:
+            child.flip(0)
+            child.flip(2)
+            child.swap(0,2)
+        return child
+
+    def __iter__(self):
+        vertex = [0,0,0]
+        j=0
+        for i in range(3):
+            vertex[self.dim[i]] = 0 if self.sgn[i]>0 else 1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0]
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] += self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[2]] += self.sgn[2] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] -= self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+
+
+
+
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -43,14 +43,3 @@
     for atype in desc['formats']:
         blanks.append(na.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)
-
-class YTArrayHandler(object):
-    def __getattr__(self, name):
-        try:
-            return object.__getattribute__(self, name)
-        except AttributeError:
-            return getattr(na, name)
-        raise
-
-#na = YTArrayHandler()
-#print na.zeros




diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/astro_objects/api.py
--- a/yt/astro_objects/api.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-API for yt.astro_objects
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-"""
-
-from .astrophysical_object import \
-    AstrophysicalObject, identification_method, correlation_method
-
-from .simulation_volume import \
-    SimulationVolume
-
-from .clumped_region import \
-    ClumpedRegion


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/astro_objects/astrophysical_object.py
--- a/yt/astro_objects/astrophysical_object.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-A base-class representing an astrophysical object
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-astro_object_registry = {}
-
-class AstrophysicalObject(object):
-    # No _type_name
-    _skip_add = False
-
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_type_name") and not cls._skip_add:
-                astro_object_registry[cls._type_name] = cls
-            cls.identification_methods = {}
-            cls.correlation_methods = {}
-
-    def _lookup_object(self, obj_name):
-        if obj_name not in astro_object_registry:
-            raise KeyError(obj_name)
-        return astro_object_registry[obj_name]
-
-    def correlate(self, other_collection, correlation_name):
-        pass
-
-    def __init__(self, data_source):
-        self.objects = {}
-        # We mandate that every object have a corresponding AMR3DData source
-        # affiliated with it.
-        self.data_source = data_source
-
-    def find(self, obj_name, identification_name, *args, **kwargs):
-        obj = self._lookup_object(obj_name)
-        if callable(identification_name):
-            identification_method = identification_name
-        else:
-            if identification_name not in obj.identification_methods:
-                raise KeyError(identification_name)
-            identification_method = \
-                obj.identification_methods[identification_name]
-        new_objs = identification_method(self, *args, **kwargs)
-        setattr(self, obj_name, new_objs)
-        self.objects[obj_name] = new_objs
-        return new_objs
-
-    def correlate(self, other_set, correlation_name, *args, **kwargs):
-        if callable(correlation_name):
-            correlation_method = correlation_name
-        else:
-            if correlation_name not in self.correlation_methods:
-                raise KeyError(correlation_name)
-            correlation_method = self.correlation_methods[correlation_name]
-        linked_objs = correlation_method(self, *args, **kwargs)
-        return linked_objs
-
-def correlation_method(obj_name, link_name):
-    def passthrough(func):
-        obj = astro_object_registry[obj_name]
-        obj.correlation_methods[link_name] = func
-        return func
-    return passthrough
-
-def identification_method(obj_name, id_name):
-    def passthrough(func):
-        obj = astro_object_registry[obj_name]
-        obj.identification_methods[id_name] = func
-        return func
-    return passthrough


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/astro_objects/clumped_region.py
--- a/yt/astro_objects/clumped_region.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-A base-class representing an astrophysical object
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from .astrophysical_object import \
-    AstrophysicalObject, identification_method, correlation_method
-    
-class ClumpedRegion(AstrophysicalObject):
-    _type_name = "clumped_region"
-    def __init__(self, data_source):
-        AstrophysicalObject.__init__(self, data_source)
-
- at identification_method("clumped_region", "level_set")
-def clumps(obj, field, min_val):
-    ds = obj.data_source
-    mi, ma = ds.quantities["Extrema"](field)[0]
-    cls = obj.data_source.extract_connected_sets(field, 1, min_val, ma)
-    return [ClumpedRegion(o) for o in cls[1][0]]


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/astro_objects/setup.py
--- a/yt/astro_objects/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('astro_objects', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/astro_objects/simulation_volume.py
--- a/yt/astro_objects/simulation_volume.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
-An AstrophysicalObject that represents a simulation volume
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from .astrophysical_object import \
-    AstrophysicalObject, identification_method, correlation_method
-    
-class SimulationVolume(AstrophysicalObject):
-    _type_name = "simulation_volume"
-    def __init__(self, data_source):
-        AstrophysicalObject.__init__(self, data_source)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -35,35 +35,6 @@
     output_type_registry, \
     EnzoRunDatabase
 
-def all_pfs(basedir='.', skip=None, max_depth=1, name_spec="*.hierarchy", **kwargs):
-    """
-    This function searchs a directory and its sub-directories, up to a
-    depth of *max_depth*, for parameter files.  It looks for the
-    *name_spec* and then instantiates an EnzoStaticOutput from
-    each. You can skip every *skip* parameter files, if *skip* is not
-    None; otherwise it will return all files.  All subsequent *kwargs*
-    are passed on to the EnzoStaticOutput constructor.
-    """
-    list_of_names = []
-    basedir = os.path.expanduser(basedir)
-    for i in range(max_depth):
-        bb = list('*' * i) + [name_spec]
-        list_of_names += glob.glob(os.path.join(basedir,*bb))
-    list_of_names.sort(key=lambda b: os.path.basename(b))
-    for fn in list_of_names[::skip]:
-        yield load(fn[:-10], **kwargs)
-
-def max_spheres(width, unit, **kwargs):
-    """
-    This calls :func:`~yt.convenience.all_pfs` and then for each parameter file
-    creates a :class:`~yt.data_objects.api.AMRSphereBase` for each one,
-    centered on the point of highest density, with radius *width* in units of
-    *unit*.
-    """
-    for pf in all_pfs(**kwargs):
-        v, c = pf.h.find_max("Density")
-        yield pf.h.sphere(c, width/pf[unit])
-
 def load(*args ,**kwargs):
     """
     This function attempts to determine the base data type of a filename or
@@ -76,22 +47,22 @@
         try:
             import Tkinter, tkFileDialog
         except ImportError:
-            return None
+            raise YTOutputNotIdentified(args, kwargs)
         root = Tkinter.Tk()
         filename = tkFileDialog.askopenfilename(parent=root,title='Choose a file')
         if filename != None:
             return load(filename)
         else:
-            return None
+            raise YTOutputNotIdentified(args, kwargs)
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
-    valid_file = [os.path.isfile(arg) if isinstance(arg, types.StringTypes) 
+    valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
             else False for arg in args]
     if not any(valid_file):
         mylog.error("None of the arguments provided to load() is a valid file")
         mylog.error("Please check that you have used a correct path")
-        return None
+        raise YTOutputNotIdentified(args, kwargs)
     for n, c in output_type_registry.items():
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)
@@ -108,11 +79,11 @@
                and output_type_registry[n]._is_valid(fn):
                 return output_type_registry[n](fn)
         mylog.error("Couldn't figure out output type for %s", args[0])
-        return None
+        raise YTOutputNotIdentified(args, kwargs)
     mylog.error("Multiple output type candidates for %s:", args[0])
     for c in candidates:
         mylog.error("    Possible: %s", c)
-    return None
+    raise YTOutputNotIdentified(args, kwargs)
 
 def projload(pf, axis, weight_field = None):
     # This is something of a hack, so that we can just get back a projection
@@ -140,77 +111,3 @@
     f.close()
     return proj
 
-def _chunk(arrlike, chunksize = 800000):
-    total_size = arrlike.shape[0]
-    pbar = get_pbar("Transferring %s " % (arrlike.name), total_size)
-    start = 0; end = 0
-    bits = []
-    while start < total_size:
-        bits.append(arrlike[start:start+chunksize])
-        pbar.update(start)
-        start += chunksize
-    pbar.finish()
-    return na.concatenate(bits)
-
-def dapload(p, axis, weight_field = None):
-    r"""Load a projection dataset from a DAP server.
-
-    If you have projections stored externally on a DAP server, this function
-    can load them (transferring in chunks to avoid overloading) locally and
-    display them.
-
-    Parameters
-    ----------
-    p : string
-        URL for the dataset on the DAP server
-    axis : int
-        The axis of projection to load (0, 1, 2)
-    weight_field : string
-        The weight_field used in the projection
-
-    Returns
-    -------
-    projmock : ProjMock
-        This is a mockup of a projection that mostly fills the API.  It can be
-        used with `yt.visualization.image_panner.api.VariableMeshPanner`
-        objects.
-
-    See Also
-    --------
-    http://www.opendap.org/ and http://pydap.org/2.x/ . (Note that HDF5 is not
-    supported on PyDAP 3.x servers.)
-
-    Examples
-    --------
-
-    >>> p = "http://datasets-r-us.org/output_0013.h5"
-    >>> proj = dapload(p, 0, "Density")
-    >>> vmp = VariableMeshPanner(proj, (512, 512), "Density", ImageSaver(0))
-    >>> vmp.zoom(1.0)
-    """
-    class PFMock(dict):
-        domain_left_edge = na.zeros(3, dtype='float64')
-        domain_right_edge = na.ones(3, dtype='float64')
-    pf = PFMock()
-    class ProjMock(dict):
-        pass
-    import dap.client
-    f = dap.client.open(p)
-    b = f["Projections"]["%s" % (axis)]
-    wf = "weight_field_%s" % weight_field
-    if wf not in b: raise KeyError(wf)
-    fields = []
-    for k in b:
-        if k.name.startswith("weight_field"): continue
-        if k.name.endswith("_%s" % weight_field):
-            fields.append(k.name)
-    proj = ProjMock()
-    for f in ["px","py","pdx","pdy"]:
-        proj[f] = _chunk(b[f])
-    for f in fields:
-        new_name = f[:-(len(str(weight_field)) + 1)]
-        proj[new_name] = _chunk(b[f])
-    proj.axis = axis
-    proj.pf = pf
-    return proj
-


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -56,6 +56,7 @@
     ParameterFileStore
 from yt.utilities.minimal_representation import \
     MinimalProjectionData, MinimalSliceData
+from yt.utilities.orientation import Orientation
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -240,6 +241,8 @@
             pass
         elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
             center = na.array(center)
+        elif center in ("c", "center"):
+            center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
             center = self.pf.h.find_max("Density")[1]
         elif center.startswith("max_"):
@@ -493,7 +496,7 @@
         self._sorted = {}
 
     def get_data(self, fields=None, in_grids=False):
-        if self._grids == None:
+        if self._grids is None:
             self._get_list_of_grids()
         points = []
         if not fields:
@@ -1130,6 +1133,9 @@
     def _mrep(self):
         return MinimalSliceData(self)
 
+    def hub_upload(self):
+        self._mrep.upload()
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1137,7 +1143,7 @@
     _type_name = "cutting"
     _con_args = ('normal', 'center')
     def __init__(self, normal, center, fields = None, node_name = None,
-                 **kwargs):
+                 north_vector = None, **kwargs):
         """
         This is a data object corresponding to an oblique slice through the
         simulation domain.
@@ -1186,16 +1192,11 @@
         self.set_field_parameter('center',center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
+        self.orienter = Orientation(normal, north_vector = north_vector)
+        self._norm_vec = self.orienter.normal_vector
         self._d = -1.0 * na.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = self.orienter.unit_vectors[0]
+        self._y_vec = self.orienter.unit_vectors[1]
         self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
         self._inv_mat = na.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
@@ -1299,7 +1300,7 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
-        height : height specifier
+        height : height specifier, optional
             This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
@@ -1640,6 +1641,9 @@
     def _mrep(self):
         return MinimalProjectionData(self)
 
+    def hub_upload(self):
+        self._mrep.upload()
+
     def _convert_field_name(self, field):
         if field == "weight_field": return "weight_field_%s" % self._weight
         if field in self._key_fields: return field
@@ -2502,7 +2506,18 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.
+        fly with a set of field_cuts.  It is very useful for applying 
+        conditions to the fields in your data object.
+        
+        Examples
+        --------
+        To find the total mass of gas above 10^6 K in your volume:
+
+        >>> pf = load("RedshiftOutput0005")
+        >>> ad = pf.h.all_data()
+        >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+        >>> print cr.quantities["TotalQuantity"]("CellMassMsun")
+
         """
         return InLineExtractedRegionBase(self, field_cuts)
 
@@ -3251,6 +3266,40 @@
         pointI = na.where(k == True)
         return pointI
 
+class AMRMaxLevelCollection(AMR3DData):
+    _type_name = "grid_collection_max_level"
+    _con_args = ("center", "max_level")
+    def __init__(self, center, max_level, fields = None,
+                 pf = None, **kwargs):
+        """
+        By selecting an arbitrary *max_level*, we can act on those grids.
+        Child cells are masked when the level of the grid is below the max
+        level.
+        """
+        AMR3DData.__init__(self, center, fields, pf, **kwargs)
+        self.max_level = max_level
+        self._refresh_data()
+
+    def _get_list_of_grids(self):
+        if self._grids is not None: return
+        gi = (self.pf.h.grid_levels <= self.max_level)[:,0]
+        self._grids = self.pf.h.grids[gi]
+
+    def _is_fully_enclosed(self, grid):
+        return True
+
+    @cache_mask
+    def _get_cut_mask(self, grid):
+        return na.ones(grid.ActiveDimensions, dtype='bool')
+
+    def _get_point_indices(self, grid, use_child_mask=True):
+        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        if use_child_mask and grid.Level < self.max_level:
+            k[grid.child_indices] = False
+        pointI = na.where(k == True)
+        return pointI
+
+
 class AMRSphereBase(AMR3DData):
     """
     A sphere of points
@@ -3326,9 +3375,18 @@
             The resolution level data is uniformly gridded at
         left_edge : array_like
             The left edge of the region to be extracted
-        right_edge : array_like
+        dims : array_like
+            Number of cells along each axis of resulting covering_grid
+        right_edge : array_like, optional
             The right edge of the region to be extracted
-
+        fields : array_like, optional
+            A list of fields that you'd like pre-generated for your object
+
+        Example
+        -------
+        cube = pf.h.covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+                                  right_edge=[1.0, 1.0, 1.0],
+                                  dims=[128, 128, 128])
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
@@ -3464,7 +3522,8 @@
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
         """A 3D region with all data extracted and interpolated to a
-        single, specified resolution.
+        single, specified resolution. (Identical to covering_grid,
+        except that it interpolates.)
 
         Smoothed covering grids start at level 0, interpolating to
         fill the region to level 1, replacing any cells actually
@@ -3477,9 +3536,18 @@
             The resolution level data is uniformly gridded at
         left_edge : array_like
             The left edge of the region to be extracted
-        right_edge : array_like
+        dims : array_like
+            Number of cells along each axis of resulting covering_grid
+        right_edge : array_like, optional
             The right edge of the region to be extracted
-
+        fields : array_like, optional
+            A list of fields that you'd like pre-generated for your object
+
+        Example
+        -------
+        cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+                                  right_edge=[1.0, 1.0, 1.0],
+                                  dims=[128, 128, 128])
         """
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -354,7 +354,7 @@
         #   1 = number of cells
         #   2 = blank
         desc = {'names': ['numgrids','numcells','level'],
-                'formats':['Int32']*3}
+                'formats':['Int64']*3}
         self.level_stats = blankRecordArray(desc, MAXLEVEL)
         self.level_stats['level'] = [i for i in range(MAXLEVEL)]
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -128,6 +128,9 @@
     def _mrep(self):
         return MinimalStaticOutput(self)
 
+    def hub_upload(self):
+        self._mrep.upload()
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         return False


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -467,7 +467,7 @@
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return na.ones(data["Density"].shape)*-1
+    return -na.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -483,20 +483,8 @@
     zv = data["z-velocity"] - bv[2]
     return xv, yv, zv
 
-def _SpecificAngularMomentum(field, data):
-    """
-    Calculate the angular velocity.  Returns a vector for each cell.
-    """
-    r_vec = obtain_rvec(data)
-    xv, yv, zv = obtain_velocities(data)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
-add_field("SpecificAngularMomentum",
-          function=_SpecificAngularMomentum,
-          convert_function=_convertSpecificAngularMomentum, vector_field=True,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
     return data.convert("mpc")/1e5
 
@@ -518,21 +506,6 @@
               convert_function=_convertSpecificAngularMomentum,
               units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
 
-add_field("SpecificAngularMomentumKMSMPC",
-          function=_SpecificAngularMomentum,
-          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
-          units=r"\rm{km}\rm{Mpc}/\rm{s}", validators=[ValidateParameter('center')])
-def _AngularMomentum(field, data):
-    return data["CellMass"] * data["SpecificAngularMomentum"]
-add_field("AngularMomentum", function=_AngularMomentum,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=True,
-         validators=[ValidateParameter('center')])
-def _AngularMomentumMSUNKMSMPC(field, data):
-    return data["CellMassMsun"] * data["SpecificAngularMomentumKMSMPC"]
-add_field("AngularMomentumMSUNKMSMPC", function=_AngularMomentum,
-          units=r"M_{\odot}\rm{km}\rm{Mpc}/\rm{s}", vector_field=True,
-         validators=[ValidateParameter('center')])
-
 def _AngularMomentumX(field, data):
     return data["CellMass"] * data["SpecificAngularMomentumX"]
 add_field("AngularMomentumX", function=_AngularMomentumX,
@@ -890,3 +863,376 @@
           units=r"\rm{s}^{-2}",
           convert_function=_convertVorticitySquared)
 
+def _gradPressureX(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    ds = div_fac * data['dx'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
+    return new_field
+def _gradPressureY(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    ds = div_fac * data['dy'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
+    return new_field
+def _gradPressureZ(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    ds = div_fac * data['dz'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
+    return new_field
+def _convertgradPressure(data):
+    return 1.0/data.convert("cm")
+for ax in 'XYZ':
+    n = "gradPressure%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertgradPressure,
+              validators=[ValidateSpatial(1, ["Pressure"])],
+              units=r"\rm{dyne}/\rm{cm}^{3}")
+
+def _gradPressureMagnitude(field, data):
+    return na.sqrt(data["gradPressureX"]**2 +
+                   data["gradPressureY"]**2 +
+                   data["gradPressureZ"]**2)
+add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
+          validators=[ValidateSpatial(1, ["Pressure"])],
+          units=r"\rm{dyne}/\rm{cm}^{3}")
+
+def _gradDensityX(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    ds = div_fac * data['dx'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
+    return new_field
+def _gradDensityY(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    ds = div_fac * data['dy'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
+    return new_field
+def _gradDensityZ(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    ds = div_fac * data['dz'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
+    return new_field
+def _convertgradDensity(data):
+    return 1.0/data.convert("cm")
+for ax in 'XYZ':
+    n = "gradDensity%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertgradDensity,
+              validators=[ValidateSpatial(1, ["Density"])],
+              units=r"\rm{g}/\rm{cm}^{4}")
+
+def _gradDensityMagnitude(field, data):
+    return na.sqrt(data["gradDensityX"]**2 +
+                   data["gradDensityY"]**2 +
+                   data["gradDensityZ"]**2)
+add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
+          validators=[ValidateSpatial(1, ["Density"])],
+          units=r"\rm{g}/\rm{cm}^{4}")
+
+def _BaroclinicVorticityX(field, data):
+    rho2 = data["Density"].astype('float64')**2
+    return (data["gradPressureY"] * data["gradDensityZ"] -
+            data["gradPressureZ"] * data["gradDensityY"]) / rho2
+def _BaroclinicVorticityY(field, data):
+    rho2 = data["Density"].astype('float64')**2
+    return (data["gradPressureZ"] * data["gradDensityX"] -
+            data["gradPressureX"] * data["gradDensityZ"]) / rho2
+def _BaroclinicVorticityZ(field, data):
+    rho2 = data["Density"].astype('float64')**2
+    return (data["gradPressureX"] * data["gradDensityY"] -
+            data["gradPressureY"] * data["gradDensityX"]) / rho2
+for ax in 'XYZ':
+    n = "BaroclinicVorticity%s" % ax
+    add_field(n, function=eval("_%s" % n),
+          validators=[ValidateSpatial(1, ["Density", "Pressure"])],
+          units=r"\rm{s}^{-1}")
+
+def _BaroclinicVorticityMagnitude(field, data):
+    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+                   data["BaroclinicVorticityY"]**2 +
+                   data["BaroclinicVorticityZ"]**2)
+add_field("BaroclinicVorticityMagnitude",
+          function=_BaroclinicVorticityMagnitude,
+          validators=[ValidateSpatial(1, ["Density", "Pressure"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityX(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
+                                 data["z-velocity"][1:-1,sl_left,1:-1]) \
+                                 / (div_fac*data["dy"].flat[0])
+    new_field[1:-1,1:-1,1:-1] -= (data["y-velocity"][1:-1,1:-1,sl_right] -
+                                  data["y-velocity"][1:-1,1:-1,sl_left]) \
+                                  / (div_fac*data["dz"].flat[0])
+    return new_field
+def _VorticityY(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
+                                 data["x-velocity"][1:-1,1:-1,sl_left]) \
+                                 / (div_fac*data["dz"].flat[0])
+    new_field[1:-1,1:-1,1:-1] -= (data["z-velocity"][sl_right,1:-1,1:-1] -
+                                  data["z-velocity"][sl_left,1:-1,1:-1]) \
+                                  / (div_fac*data["dx"].flat[0])
+    return new_field
+def _VorticityZ(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
+                                 data["y-velocity"][sl_left,1:-1,1:-1]) \
+                                 / (div_fac*data["dx"].flat[0])
+    new_field[1:-1,1:-1,1:-1] -= (data["x-velocity"][1:-1,sl_right,1:-1] -
+                                  data["x-velocity"][1:-1,sl_left,1:-1]) \
+                                  / (div_fac*data["dy"].flat[0])
+    return new_field
+def _convertVorticity(data):
+    return 1.0/data.convert("cm")
+for ax in 'XYZ':
+    n = "Vorticity%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertVorticity,
+              validators=[ValidateSpatial(1, 
+                          ["x-velocity", "y-velocity", "z-velocity"])],
+              units=r"\rm{s}^{-1}")
+
+def _VorticityMagnitude(field, data):
+    return na.sqrt(data["VorticityX"]**2 +
+                   data["VorticityY"]**2 +
+                   data["VorticityZ"]**2)
+add_field("VorticityMagnitude", function=_VorticityMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityStretchingX(field, data):
+    return data["DivV"] * data["VorticityX"]
+def _VorticityStretchingY(field, data):
+    return data["DivV"] * data["VorticityY"]
+def _VorticityStretchingZ(field, data):
+    return data["DivV"] * data["VorticityZ"]
+for ax in 'XYZ':
+    n = "VorticityStretching%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              validators=[ValidateSpatial(0)])
+def _VorticityStretchingMagnitude(field, data):
+    return na.sqrt(data["VorticityStretchingX"]**2 +
+                   data["VorticityStretchingY"]**2 +
+                   data["VorticityStretchingZ"]**2)
+add_field("VorticityStretchingMagnitude", 
+          function=_VorticityStretchingMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityGrowthX(field, data):
+    return -data["VorticityStretchingX"] - data["BaroclinicVorticityX"]
+def _VorticityGrowthY(field, data):
+    return -data["VorticityStretchingY"] - data["BaroclinicVorticityY"]
+def _VorticityGrowthZ(field, data):
+    return -data["VorticityStretchingZ"] - data["BaroclinicVorticityZ"]
+for ax in 'XYZ':
+    n = "VorticityGrowth%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              validators=[ValidateSpatial(1, 
+                          ["x-velocity", "y-velocity", "z-velocity"])],
+              units=r"\rm{s}^{-2}")
+def _VorticityGrowthMagnitude(field, data):
+    result = na.sqrt(data["VorticityGrowthX"]**2 +
+                     data["VorticityGrowthY"]**2 +
+                     data["VorticityGrowthZ"]**2)
+    dot = na.zeros(result.shape)
+    for ax in "XYZ":
+        dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
+    result = na.sign(dot) * result
+    return result
+add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}",
+          take_log=False)
+def _VorticityGrowthMagnitudeABS(field, data):
+    return na.sqrt(data["VorticityGrowthX"]**2 +
+                   data["VorticityGrowthY"]**2 +
+                   data["VorticityGrowthZ"]**2)
+add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityGrowthTimescale(field, data):
+    domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
+    domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
+    domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
+    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}")
+
+########################################################################
+# With radiation pressure
+########################################################################
+
+def _VorticityRadPressureX(field, data):
+    rho = data["Density"].astype('float64')
+    return (data["RadAccel2"] * data["gradDensityZ"] -
+            data["RadAccel3"] * data["gradDensityY"]) / rho
+def _VorticityRadPressureY(field, data):
+    rho = data["Density"].astype('float64')
+    return (data["RadAccel3"] * data["gradDensityX"] -
+            data["RadAccel1"] * data["gradDensityZ"]) / rho
+def _VorticityRadPressureZ(field, data):
+    rho = data["Density"].astype('float64')
+    return (data["RadAccel1"] * data["gradDensityY"] -
+            data["RadAccel2"] * data["gradDensityX"]) / rho
+def _convertRadAccel(data):
+    return data.convert("x-velocity")/data.convert("Time")
+for ax in 'XYZ':
+    n = "VorticityRadPressure%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertRadAccel,
+              validators=[ValidateSpatial(1, 
+                   ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+              units=r"\rm{s}^{-1}")
+
+def _VorticityRadPressureMagnitude(field, data):
+    return na.sqrt(data["VorticityRadPressureX"]**2 +
+                   data["VorticityRadPressureY"]**2 +
+                   data["VorticityRadPressureZ"]**2)
+add_field("VorticityRadPressureMagnitude",
+          function=_VorticityRadPressureMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityRPGrowthX(field, data):
+    return -data["VorticityStretchingX"] - data["BaroclinicVorticityX"] \
+           -data["VorticityRadPressureX"]
+def _VorticityRPGrowthY(field, data):
+    return -data["VorticityStretchingY"] - data["BaroclinicVorticityY"] \
+           -data["VorticityRadPressureY"]
+def _VorticityRPGrowthZ(field, data):
+    return -data["VorticityStretchingZ"] - data["BaroclinicVorticityZ"] \
+           -data["VorticityRadPressureZ"]
+for ax in 'XYZ':
+    n = "VorticityRPGrowth%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              validators=[ValidateSpatial(1, 
+                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+              units=r"\rm{s}^{-1}")
+def _VorticityRPGrowthMagnitude(field, data):
+    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+                     data["VorticityRPGrowthY"]**2 +
+                     data["VorticityRPGrowthZ"]**2)
+    dot = na.zeros(result.shape)
+    for ax in "XYZ":
+        dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
+    result = na.sign(dot) * result
+    return result
+add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}",
+          take_log=False)
+def _VorticityRPGrowthMagnitudeABS(field, data):
+    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+                   data["VorticityRPGrowthY"]**2 +
+                   data["VorticityRPGrowthZ"]**2)
+add_field("VorticityRPGrowthMagnitudeABS", 
+          function=_VorticityRPGrowthMagnitudeABS,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityRPGrowthTimescale(field, data):
+    domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
+    domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
+    domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
+    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}")


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -30,8 +30,6 @@
 import os
 import struct
 
-import pdb
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
       AMRGridPatch
@@ -56,6 +54,21 @@
 
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs
+    
+from yt.frontends.art.definitions import art_particle_field_names
+
+from yt.frontends.art.io import _read_child_mask_level
+from yt.frontends.art.io import read_particles
+from yt.frontends.art.io import read_stars
+from yt.frontends.art.io import _count_art_octs
+from yt.frontends.art.io import _read_art_level_info
+from yt.frontends.art.io import _read_art_child
+from yt.frontends.art.io import _skip_record
+from yt.frontends.art.io import _read_record
+from yt.frontends.art.io import _read_frecord
+from yt.frontends.art.io import _read_record_size
+from yt.frontends.art.io import _read_struct
+from yt.frontends.art.io import b2t
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -68,14 +81,21 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, start_index):
+    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
+        start_index = props[0]
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
+        
+        self.LeftEdge = props[0]
+        self.RightEdge = props[1]
+        self.ActiveDimensions = props[2] 
+        #if child_mask is not None:
+        #    self._set_child_mask(child_mask)
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -118,92 +138,58 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
+        #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
-
+        self._setup_field_list()
+        
     def _initialize_data_storage(self):
         pass
 
     def _detect_fields(self):
         # This will need to be generalized to be used elsewhere.
         self.field_list = [ 'Density','TotalEnergy',
-                            'x-momentum','y-momentum','z-momentum',
-                            'Pressure','Gamma','GasEnergy',
-                            'Metal_DensitySNII', 'Metal_DensitySNIa',
-                            'Potential_New','Potential_Old']
-    
+             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+             'Pressure','Gamma','GasEnergy',
+             'MetalDensitySNII', 'MetalDensitySNIa',
+             'PotentialNew','PotentialOld']
+        self.field_list += art_particle_field_names
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
 
     def _count_grids(self):
-        # We have to do all the patch-coalescing here.
-        #level_info is used by the IO so promoting it to the static
-        # output class
-        #self.pf.level_info = [self.pf.ncell] # skip root grid for now
-        #leve_info = []
-        # amr_utils.count_art_octs(
-        #         self.pf.parameter_filename, self.pf.child_grid_offset,
-        #         self.pf.min_level, self.pf.max_level, self.pf.nhydro_vars,
-        #         self.pf.level_info)
+        LEVEL_OF_EDGE = 7
+        MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
+        
+        min_eff = 0.30
+        
+        vol_max = 128**3
         
         f = open(self.pf.parameter_filename,'rb')
-        self.pf.nhydro_vars, self.pf.level_info = _count_art_octs(f, 
+        
+        
+        (self.pf.nhydro_vars, self.pf.level_info,
+        self.pf.level_oct_offsets, 
+        self.pf.level_child_offsets) = \
+                         _count_art_octs(f, 
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        f.close()
-        self.pf.level_info = na.array(self.pf.level_info)
-        num_ogrids = sum(self.pf.level_info) + self.pf.iOctFree
-        print 'found %i oct grids'%num_ogrids
-        num_ogrids *=7
-        print 'instantiating... %i grids'%num_ogrids
-        ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
-        ogrid_levels = na.zeros(num_ogrids, dtype='int64')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        
-        #don't need parents?
-        #ogrid_parents = na.zeros(num_ogrids, dtype="int64")
-        
-        #don't need masks?
-        #ochild_masks = na.zeros((num_ogrids, 8), dtype='int64').ravel()
-        
-        self.pf.level_offsets = amr_utils.read_art_tree(
-                                self.pf.parameter_filename, 
-                                self.pf.child_grid_offset,
-                                self.pf.min_level, self.pf.max_level,
-                                ogrid_left_indices, ogrid_levels,
-                                ogrid_file_locations)
-                                #ochild_masks,
-                                #ogrid_parents, 
-                                
+        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_offsets = self.pf.level_child_offsets
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
-        #ochild_masks.reshape((num_ogrids, 8), order="F")
-        ogrid_levels[ogrid_left_indices[:,0] == -999] = -1
-        # This bit of code comes from Chris, and I'm still not sure I have a
-        # handle on what it does.
-        final_indices =  ogrid_left_indices[na.where(ogrid_levels==self.pf.max_level)[0]]
-        divisible=[na.all((final_indices%2**(level))==0) 
-            for level in xrange(self.pf.max_level*2)]
-        root_level = self.pf.max_level+na.where(na.logical_not(divisible))[0][0] 
-        ogrid_dimension = na.zeros(final_indices.shape,dtype='int')+2
-        ogrid_left_indices = ogrid_left_indices/2**(root_level - ogrid_levels[:,None] - 1) - 1
-
-        # Now we can rescale
-        # root_psg = _ramses_reader.ProtoSubgrid(
-        #                 na.zeros(3, dtype='int64'), # left index of PSG
-        #                 self.pf.domain_dimensions, # dim of PSG
-        #                 na.zeros((1,3), dtype='int64'), # left edges of grids
-        #                 self.pf.domain_dimensions[None,:], # right edges of grids
-        #                 self.pf.domain_dimensions[None,:], # dims of grids
-        #                 na.zeros((1,6), dtype='int64') # empty
-        #                 )
+        
+        self.pf.level_art_child_masks = {}
+        cm = self.pf.root_iOctCh>0
+        cm_shape = (1,)+cm.shape 
+        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
+        del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
@@ -217,198 +203,315 @@
             if self.pf.level_info[level] == 0:
                 self.proto_grids.append([])
                 continue
-            ggi = (ogrid_levels == level).ravel()
-            mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
-            nd = self.pf.domain_dimensions * 2**level
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
-            fl = ogrid_file_locations[ggi,:]
-            # Now our initial protosubgrid
-            #if level == 6: raise RuntimeError
-            # We want grids that cover no more than MAX_EDGE cells in every direction
-            MAX_EDGE = 128
             psgs = []
+            effs,sizes = [], []
+
+            if level > self.pf.limit_level : continue
+            
             #refers to the left index for the art octgrid
-            left_index = ogrid_left_indices[ggi,:]
-            right_index = left_index + 2
-            #Since we are re-gridding these octs on larger meshes
-            #each sub grid has length MAX_EDGE, and so get the LE of
-            #grids fit inside the domain
-            # nd is the dimensions of the domain at this level
-            lefts = [na.mgrid[0:nd[i]:MAX_EDGE] for i in range(3)]
-            #lefts = zip(*[l.ravel() for l in lefts])
-            pbar = get_pbar("Re-gridding ", lefts[0].size)
-            min_ind = na.min(left_index, axis=0)
-            max_ind = na.max(right_index, axis=0)
+            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
-            #iterate over the ith dimension of the yt grids
-            for i,dli in enumerate(lefts[0]):
-                pbar.update(i)
+            #read in the child masks for this level and save them
+            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
+                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+            self.pf.level_art_child_masks[level]=art_child_mask
+            #child_mask is zero where child grids exist and
+            #thus where higher resolution data is available
+            
+            
+            #compute the hilbert indices up to a certain level
+            #the indices will associate an oct grid to the nearest
+            #hilbert index?
+            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
+                              na.log10(2))
+            hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                    level + base_level, left_index)
+            #print base_level, hilbert_indices.max(),
+            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+            #print hilbert_indices.max()
+            
+            # Strictly speaking, we don't care about the index of any
+            # individual oct at this point.  So we can then split them up.
+            unique_indices = na.unique(hilbert_indices)
+            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                        level, unique_indices.size, hilbert_indices.size)
+            
+            #use the hilbert indices to order oct grids so that consecutive
+            #items on a list are spatially near each other
+            #this is useful because we will define grid patches over these
+            #octs, which are more efficient if the octs are spatially close
+            
+            #split into list of lists, with domains containing 
+            #lists of sub octgrid left indices and an index
+            #referring to the domain on which they live
+            pbar = get_pbar("Calc Hilbert Indices ",1)
+            locs, lefts = _ramses_reader.get_array_indices_lists(
+                        hilbert_indices, unique_indices, left_index, fl)
+            pbar.finish()
+            
+            #iterate over the domains    
+            step=0
+            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+            psg_eff = []
+            for ddleft_index, ddfl in zip(lefts, locs):
+                #iterate over just the unique octs
+                #why would we ever have non-unique octs?
+                #perhaps the hilbert ordering may visit the same
+                #oct multiple times - review only unique octs 
+                #for idomain in na.unique(ddfl[:,1]):
+                #dom_ind = ddfl[:,1] == idomain
+                #dleft_index = ddleft_index[dom_ind,:]
+                #dfl = ddfl[dom_ind,:]
                 
-                #skip this grid if there are no art grids inside
-                #of the zeroeth dimension
-                if min_ind[0] > dli + nd[0]: continue
-                if max_ind[0] < dli: continue
+                dleft_index = ddleft_index
+                dfl = ddfl
+                initial_left = na.min(dleft_index, axis=0)
+                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                #this creates a grid patch that doesn't cover the whole level
+                #necessarily, but with other patches covers all the regions
+                #with octs. This object automatically shrinks its size
+                #to barely encompass the octs inside of it.
+                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                dleft_index, dfl)
+                if psg.efficiency <= 0: continue
                 
-                # span of the current domain limited to max_edge
-                idim = min(nd[0] - dli, MAX_EDGE)
-
-                #gdi finds all of the art octs grids inside the 
-                #ith dimension of our current grid
-                gdi = ((dli  <= right_index[:,0])
-                     & (dli + idim >= left_index[:,0]))
-                     
-
-                #if none of our art octs fit inside, skip                    
-                if not na.any(gdi): continue
+                #because grid patches may still be mostly empty, and with octs
+                #that only partially fill the grid,it  may be more efficient
+                #to split large patches into smaller patches. We split
+                #if less than 10% the volume of a patch is covered with octs
+                if idims.prod() > vol_max or psg.efficiency < min_eff:
+                    psg_split = _ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl,min_eff=min_eff,use_center=True,
+                        split_on_vol=vol_max)
+                    
+                    psgs.extend(psg_split)
+                    psg_eff += [x.efficiency for x in psg_split] 
+                else:
+                    psgs.append(psg)
+                    psg_eff =  [psg.efficiency,]
                 
-                #iterate over the jth dimension of the yt grids
-                for dlj in lefts[1]:
-                    
-                    #this is the same process as in the previous dimension
-                    #find art octs inside this grid's jth dimension, 
-                    #skip if there are none
-                    if min_ind[1] > dlj + nd[1]: continue
-                    if max_ind[1] < dlj: continue
-                    idim = min(nd[1] - dlj, MAX_EDGE)
-                    gdj = ((dlj  <= right_index[:,1])
-                         & (dlj + idim >= left_index[:,1])
-                         & (gdi))
-                    if not na.any(gdj): continue
-                    
-                    #Same story: iterate over kth dimension grids
-                    for dlk in lefts[2]:
-                        if min_ind[2] > dlk + nd[2]: continue
-                        if max_ind[2] < dlk: continue
-                        idim = min(nd[2] - dlk, MAX_EDGE)
-                        gdk = ((dlk  <= right_index[:,2])
-                             & (dlk + idim >= left_index[:,2])
-                             & (gdj))
-                        if not na.any(gdk): continue
-                        
-                        #these are coordinates for yt grid
-                        left = na.array([dli, dlj, dlk])
-                        
-                        #does this ravel really do anything?
-                        domain_left = left.ravel()
-                        
-                        #why are we adding this to zero?
-                        initial_left = na.zeros(3, dtype='int64') + domain_left
-                        
-                        #still not sure why multiplying against one 
-                        #just type casting?
-                        idims = na.ones(3, dtype='int64') * na.minimum(nd - domain_left, MAX_EDGE)
-                        
-                        # We want to find how many grids are inside.
-                        
-                        #this gives us the LE and RE, domain dims,
-                        # and file locations
-                        # for art octs within this grid
-                        dleft_index = left_index[gdk,:]
-                        dright_index = right_index[gdk,:]
-                        ddims = dims[gdk,:]
-                        dfl = fl[gdk,:]
-                        
-                        #create a sub grid composed
-                        #of the new yt grid LE, span,
-                        #and a series of the contained art grid properties:
-                        # left edge, right edge, (not sure what dims is) and file locations
-                        psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                        dleft_index, dfl)
-                        
-                        #print "Gridding from %s to %s + %s" % (
-                        #    initial_left, initial_left, idims)
-                        if psg.efficiency <= 0: continue
-                        self.num_deep = 0
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        
-                        #I'm not sure how this patch splitting process
-                        #does, or how it works
-                        psgs.extend(_ramses_reader.recursive_patch_splitting(
-                            psg, idims, initial_left, dleft_index, dfl))
-                        
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        psgs.extend([psg])
+                tol = 1.00001
+                
+                
+                step+=1
+                pbar.update(step)
+            eff_mean = na.mean(psg_eff)
+            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_nall = len(psg_eff)
+            mylog.info("Average subgrid efficiency %02.1f %%",
+                        eff_mean*100.0)
+            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
+            
+        
+            mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
-            sums = na.zeros(3, dtype='int64')
-            mylog.info("Final grid count: %s", len(self.proto_grids[level]))
+            #print sum(len(psg.grid_file_locations) for psg in psgs)
+            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
-            # for g in self.proto_grids[level]:
-            #     sums += [s.sum() for s in g.sigs]
-            # assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
+                    
+            
+            
 
     num_deep = 0
 
-    # @num_deep_inc
-    # def _recursive_patch_splitting(self, psg, dims, ind,
-    #         left_index, right_index, gdims, fl):
-    #     min_eff = 0.1 # This isn't always respected.
-    #     if self.num_deep > 40:
-    #         # If we've recursed more than 100 times, we give up.
-    #         psg.efficiency = min_eff
-    #         return [psg]
-    #     if psg.efficiency > min_eff or psg.efficiency < 0.0:
-    #         return [psg]
-    #     tt, ax, fp = psg.find_split()
-    #     if (fp % 2) != 0:
-    #         if dims[ax] != fp + 1:
-    #             fp += 1
-    #         else:
-    #             fp -= 1
-    #     #print " " * self.num_deep + "Got ax", ax, "fp", fp
-    #     dims_l = dims.copy()
-    #     dims_l[ax] = fp
-    #     li_l = ind.copy()
-    #     if na.any(dims_l <= 0): return [psg]
-    #     L = _ramses_reader.ProtoSubgrid(
-    #             li_l, dims_l, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "L", tt, L.efficiency
-    #     #if L.efficiency > 1.0: raise RuntimeError
-    #     if L.efficiency <= 0.0: L = []
-    #     elif L.efficiency < min_eff:
-    #         L = self._recursive_patch_splitting(L, dims_l, li_l,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         L = [L]
-    #     dims_r = dims.copy()
-    #     dims_r[ax] -= fp
-    #     li_r = ind.copy()
-    #     li_r[ax] += fp
-    #     if na.any(dims_r <= 0): return [psg]
-    #     R = _ramses_reader.ProtoSubgrid(
-    #             li_r, dims_r, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "R", tt, R.efficiency
-    #     #if R.efficiency > 1.0: raise RuntimeError
-    #     if R.efficiency <= 0.0: R = []
-    #     elif R.efficiency < min_eff:
-    #         R = self._recursive_patch_splitting(R, dims_r, li_r,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         R = [R]
-    #     return L + R
         
     def _parse_hierarchy(self):
-        # We have important work to do
+        """ The root grid has no octs except one which is refined.
+        Still, it is the size of 128 cells along a length.
+        Ignore the proto subgrid created for the root grid - it is wrong.
+        """
         grids = []
         gi = 0
+        
         for level, grid_list in enumerate(self.proto_grids):
+            #The root level spans [0,2]
+            #The next level spans [0,256]
+            #The 3rd Level spans up to 128*2^3, etc.
+            #Correct root level to span up to 128
+            correction=1L
+            if level == 0:
+                correction=64L
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()
+                props = g.get_properties()*correction
                 dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
                 self.grid_left_edge[gi,:] = props[0,:] / dds
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                grids.append(self.grid(gi, self, level, fl, props[0,:]))
+                child_mask = na.zeros(props[2,:],'uint8')
+                amr_utils.fill_child_mask(fl,props[0],
+                    self.pf.level_art_child_masks[level],
+                    child_mask)
+                grids.append(self.grid(gi, self, level, fl, 
+                    props*na.array(correction).astype('int64')))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
+        
+
+        if self.pf.file_particle_data:
+            #import pdb; pdb.set_trace()
+            lspecies = self.pf.parameters['lspecies']
+            wspecies = self.pf.parameters['wspecies']
+            Nrow     = self.pf.parameters['Nrow']
+            nstars = lspecies[-1]
+            a = self.pf.parameters['aexpn']
+            hubble = self.pf.parameters['hubble']
+            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
+            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
+            um  = self.pf.parameters['aM0'] #mass units in solar masses
+            um *= 1.989e33 #convert solar masses to grams 
+            pbar = get_pbar("Loading Particles   ",5)
+            self.pf.particle_position,self.pf.particle_velocity = \
+                read_particles(self.pf.file_particle_data,nstars,Nrow)
+            pbar.update(1)
+            npa,npb=0,0
+            npb = lspecies[-1]
+            clspecies = na.concatenate(([0,],lspecies))
+            if self.pf.only_particle_type is not None:
+                npb = lspecies[0]
+                if type(self.pf.only_particle_type)==type(5):
+                    npa = clspecies[self.pf.only_particle_type]
+                    npb = clspecies[self.pf.only_particle_type+1]
+            np = npb-npa
+            self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
+            pbar.update(2)
+            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
+            pbar.update(3)
+            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+            self.pf.particle_velocity  *= uv #to proper cm/s
+            pbar.update(4)
+            self.pf.particle_type         = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = na.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            
+            dist = self.pf['cm']/self.pf.domain_dimensions[0]
+            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_species'] = 1.0
+            for ax in 'xyz':
+                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
+                #already in unitary units
+                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
+            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
+            self.pf.conversion_factors['particle_metallicity']=1.0
+            self.pf.conversion_factors['particle_metallicity1']=1.0
+            self.pf.conversion_factors['particle_metallicity2']=1.0
+            self.pf.conversion_factors['particle_index']=1.0
+            self.pf.conversion_factors['particle_type']=1
+            self.pf.conversion_factors['particle_age']=1
+            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
+            
+
+            a,b=0,0
+            for i,(b,m) in enumerate(zip(lspecies,wspecies)):
+                if type(self.pf.only_particle_type)==type(5):
+                    if not i==self.pf.only_particle_type:
+                        continue
+                    self.pf.particle_type += i
+                    self.pf.particle_mass += m*um
+
+                else:
+                    self.pf.particle_type[a:b] = i #particle type
+                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                a=b
+            pbar.finish()
+
+            nparticles = [0,]+list(lspecies)
+            for j,np in enumerate(nparticles):
+                mylog.debug('found %i of particle type %i'%(j,np))
+            
+            if self.pf.single_particle_mass:
+                #cast all particle masses to the same mass
+                cast_type = self.pf.single_particle_type
+                
+
+            
+            self.pf.particle_star_index = i
+            
+            do_stars = (self.pf.only_particle_type is None) or \
+                       (self.pf.only_particle_type == -1) or \
+                       (self.pf.only_particle_type == len(lspecies))
+            if self.pf.file_star_data and do_stars: 
+                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
+                     = read_stars(self.pf.file_star_data,nstars,Nrow)
+                nstars = nstars[0] 
+                if nstars > 0 :
+                    n=min(1e2,len(tbirth))
+                    pbar = get_pbar("Stellar Ages        ",n)
+                    sages  = \
+                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
+                    sages *= 1.0e9
+                    sages *= 365*24*3600 #to seconds
+                    sages = self.pf.current_time-sages
+                    self.pf.particle_age[-nstars:] = sages
+                    pbar.finish()
+                    self.pf.particle_metallicity1[-nstars:] = metallicity1
+                    self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    self.pf.particle_mass_initial[-nstars:] = imass*um
+                    self.pf.particle_mass[-nstars:] = mass*um
+
+            done = 0
+            init = self.pf.particle_position.shape[0]
+            pos = self.pf.particle_position
+            #particle indices travel with the particle positions
+            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #if type(self.pf.grid_particles) == type(5):
+            #    max_level = min(max_level,self.pf.grid_particles)
+            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            
+            #grid particles at the finest level, removing them once gridded
+            #pbar = get_pbar("Gridding Particles ",init)
+            #assignment = amr_utils.assign_particles_to_cells(
+            #        self.grid_levels.ravel().astype('int32'),
+            #        self.grid_left_edge.astype('float32'),
+            #        self.grid_right_edge.astype('float32'),
+            #        pos[:,0].astype('float32'),
+            #        pos[:,1].astype('float32'),
+            #        pos[:,2].astype('float32'))
+            #pbar.finish()
+
+            pbar = get_pbar("Gridding Particles ",init)
+            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
+                    self.grid_levels.ravel().astype('int32'),
+                    2, #only bother gridding particles to level 2
+                    self.grid_left_edge.astype('float32'),
+                    self.grid_right_edge.astype('float32'),
+                    pos[:,0].astype('float32'),
+                    pos[:,1].astype('float32'),
+                    pos[:,2].astype('float32'))
+            pbar.finish()
+            
+            
+            pbar = get_pbar("Filling grids ",init)
+            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
+                np = len(ilist)
+                grid_particle_count[gidx,0]=np
+                g.hierarchy.grid_particle_count = grid_particle_count
+                g.particle_indices = ilist
+                grids[gidx] = g
+                done += np
+                pbar.update(done)
+            pbar.finish()
+
+            #assert init-done== 0 #we have gridded every particle
+            
+        pbar = get_pbar("Finalizing grids ",len(grids))
+        for gi, g in enumerate(grids): 
+            self.grids[gi] = g
+        pbar.finish()
+            
 
     def _get_grid_parents(self, grid, LE, RE):
         mask = na.zeros(self.num_grids, dtype='bool')
@@ -429,6 +532,54 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
+    # def _populate_grid_objects(self):
+    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     pb = get_pbar("Populating grids", len(self.grids))
+    #     for gi,g in enumerate(self.grids):
+    #         pb.update(gi)
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level - 1,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask)
+    #         parents = self.grids[mask.astype("bool")]
+    #         if len(parents) > 0:
+    #             g.Parent.extend((p for p in parents.tolist()
+    #                     if p.locations[0,0] == g.locations[0,0]))
+    #             for p in parents: p.Children.append(g)
+    #         # Now we do overlapping siblings; note that one has to "win" with
+    #         # siblings, so we assume the lower ID one will "win"
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask, gi)
+    #         mask[gi] = False
+    #         siblings = self.grids[mask.astype("bool")]
+    #         if len(siblings) > 0:
+    #             g.OverlappingSiblings = siblings.tolist()
+    #         g._prepare_grid()
+    #         g._setup_dx()
+    #     pb.finish()
+    #     self.max_level = self.grid_levels.max()
+
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=True, particle_type=True)
+
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -446,16 +597,65 @@
     _handle = None
     
     def __init__(self, filename, data_style='art',
-                 storage_filename = None):
+                 storage_filename = None, 
+                 file_particle_header=None, 
+                 file_particle_data=None,
+                 file_star_data=None,
+                 discover_particles=False,
+                 use_particles=True,
+                 limit_level=None,
+                 only_particle_type = None,
+                 grid_particles=False,
+                 single_particle_mass=False,
+                 single_particle_type=0):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
+        
+        
+        dirn = os.path.dirname(filename)
+        base = os.path.basename(filename)
+        aexp = base.split('_')[2].replace('.d','')
+        
+        self.file_particle_header = file_particle_header
+        self.file_particle_data = file_particle_data
+        self.file_star_data = file_star_data
+        self.only_particle_type = only_particle_type
+        self.grid_particles = grid_particles
+        self.single_particle_mass = single_particle_mass
+        
+        if limit_level is None:
+            self.limit_level = na.inf
+        else:
+            mylog.info("Using maximum level: %i",limit_level)
+            self.limit_level = limit_level
+        
+        if discover_particles:
+            if file_particle_header is None:
+                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_header = loc
+                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
+            if file_particle_data is None:
+                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_data = loc
+                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
+            if file_star_data is None:
+                loc = filename.replace(base,'stars_%s.dat'%aexp)
+                if os.path.exists(loc):
+                    self.file_star_data = loc
+                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
+        
+        self.use_particles = any([self.file_particle_header,
+            self.file_star_data, self.file_particle_data])
         StaticOutput.__init__(self, filename, data_style)
-        self.storage_filename = storage_filename
         
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'art'
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["InitialTime"]=self.current_time
+        self.storage_filename = storage_filename
+        
         
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
@@ -471,8 +671,10 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-
+        
+        
         z = self.current_redshift
+        
         h = self.hubble_constant
         boxcm_cal = self["boxh"]
         boxcm_uncal = boxcm_cal / h
@@ -505,29 +707,35 @@
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
         self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))      
-        self.conversion_factors["Density"] = \
-            self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = \
-            self.rho0*self.v0**2*(aexpn**-5.0)
+        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
         tr  = self.tr
+        
+        #factors to multiply the native code units to CGS
+        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
+        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
+        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
+        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
+        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
         self.conversion_factors["Temperature"] = tr
-        self.conversion_factors["Metal_Density"] = 1
+        self.conversion_factors["Potential"] = 1.0
+        self.cosmological_simulation = True
         
         # Now our conversion factors
         for ax in 'xyz':
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        self.time_units['years'] = seconds / (365*3600*24.0)
-        self.time_units['days']  = seconds / (3600*24.0)
-        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
-        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['years'] = 1.0/(365*3600*24.0)
+        self.time_units['days']  = 1.0 / (3600*24.0)
+
 
         #we were already in seconds, go back in to code units
-        self.current_time /= self.t0 
+        #self.current_time /= self.t0 
+        #self.current_time = b2t(self.current_time,n=1)
         
-        
+    
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
@@ -594,8 +802,14 @@
         self.parameters["Y_p"] = 0.245
         self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
         self.parameters["gamma"] = 5./3.
+        self.parameters["T_CMB0"] = 2.726  
+        self.parameters["T_min"] = 300.0 #T floor in K
+        self.parameters["boxh"] = header_vals['boxh']
+        self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
+        self.parameters['CosmologyInitialRedshift']=self.current_redshift
         self.data_comment = header_vals['jname']
+        self.current_time_raw = header_vals['t']
         self.current_time = header_vals['t']
         self.omega_lambda = header_vals['Oml0']
         self.omega_matter = header_vals['Om0']
@@ -606,26 +820,62 @@
         #nchem is nhydrovars-8, so we typically have 2 extra chem species 
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
-        def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-            return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
-        integrand_arr = integrand(spacings)
-        self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
-        self.current_time *= self.hubble_time
-                
+        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
+        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
+        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        # integrand_arr = integrand(spacings)
+        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time *= self.hubble_time
+        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 
+        
+        Om0 = self.parameters['Om0']
+        hubble = self.parameters['hubble']
+        dummy = 100.0 * hubble * na.sqrt(Om0)
+        ng = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh = header_vals['boxh'] 
+        
+        #distance unit #boxh is units of h^-1 Mpc
+        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
+        r0 = self.parameters["r0"]
+        #time, yrs
+        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
+        #velocity velocity units in km/s
+        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
+                na.sqrt(self.parameters["Om0"])
+        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
+        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
+        rho0 = self.parameters["rho0"]
+        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
+        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        #T_0 = unit of temperature in K and in keV)
+        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
+        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        #S_0 = unit of entropy in keV * cm^2
+        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        
+        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
+        #     for non-cosmological run aM0 must be defined during initialization
+        #     [aM0] = [Msun]
+        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
+        
+        #CGS for everything in the next block
+    
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = na.log2(self.ncell) / 3
-        if int(est) != est: raise RuntimeError
+        est = int(na.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64') * int(2**est)
+        self.domain_dimensions = na.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
-        _skip_record(f) # iOctCh
+        #_skip_record(f) # iOctCh
+        root_cells = self.domain_dimensions.prod()
+        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
         self.root_grid_offset = f.tell()
         _skip_record(f) # hvar
         _skip_record(f) # var
@@ -634,61 +884,71 @@
         self.child_grid_offset = f.tell()
 
         f.close()
+        
+        if self.file_particle_header is not None:
+            self._read_particle_header(self.file_particle_header)
+        
+    def _read_particle_header(self,fn):    
+        """ Reads control information, various parameters from the 
+            particle data set. Adapted from Daniel Ceverino's 
+            Read_Particles_Binary in analysis_ART.F   
+        """ 
+        header_struct = [
+            ('>i','pad'),
+            ('45s','header'), 
+            ('>f','aexpn'),
+            ('>f','aexp0'),
+            ('>f','amplt'),
+            ('>f','astep'),
+
+            ('>i','istep'),
+            ('>f','partw'),
+            ('>f','tintg'),
+
+            ('>f','Ekin'),
+            ('>f','Ekin1'),
+            ('>f','Ekin2'),
+            ('>f','au0'),
+            ('>f','aeu0'),
+
+
+            ('>i','Nrow'),
+            ('>i','Ngridc'),
+            ('>i','Nspecies'),
+            ('>i','Nseed'),
+
+            ('>f','Om0'),
+            ('>f','Oml0'),
+            ('>f','hubble'),
+            ('>f','Wp5'),
+            ('>f','Ocurv'),
+            ('>f','Omb0'),
+            ('>%ds'%(396),'extras'),
+            ('>f','unknown'),
+
+            ('>i','pad')]
+        fh = open(fn,'rb')
+        vals = _read_struct(fh,header_struct)
+        
+        for k,v in vals.iteritems():
+            self.parameters[k]=v
+        
+        seek_extras = 137
+        fh.seek(seek_extras)
+        n = self.parameters['Nspecies']
+        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
+        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
+        fh.close()
+        
+        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
+        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
+        
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         return False # We make no effort to auto-detect ART data
 
-def _skip_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(s[0], 1)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
 
-def _read_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    ss = f.read(s)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    return ss
-
-def _read_record_size(f):
-    pos = f.tell()
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(pos)
-    return s[0]
-
-def _count_art_octs(f, offset,
-                   MinLev, MaxLevelNow):
-    import gc
-    f.seek(offset)
-    nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
-        #Get the info for this level, skip the rest
-        #print "Reading oct tree data for level", Lev
-        #print 'offset:',f.tell()
-        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-           '>iii', _read_record(f))
-        print 'Level %i : '%Lev, iNOLL
-        #print 'offset after level record:',f.tell()
-        iOct = iHOLL[Lev] - 1
-        nLevel = iNOLL[Lev]
-        nLevCells = nLevel * nchild
-        ntot = ntot + nLevel
-
-        #Skip all the oct hierarchy data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel)
-        
-        #Skip the child vars data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel*nchild)
-        
-        #find nhydrovars
-        nhydrovars = 8+2
-    f.seek(offset)
-    return nhydrovars, iNOLL
-


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -0,0 +1,43 @@
+"""
+Definitions specific to ART
+
+Author: Christopher E. Moody <cemoody at ucsc.ed>
+Affiliation: UC Santa Cruz
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Christopher E. Moody.  All Rights
+  Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+art_particle_field_names = [
+'particle_age',
+'particle_index',
+'particle_mass',
+'particle_mass_initial',
+'particle_creation_time',
+'particle_metallicity1',
+'particle_metallicity2',
+'particle_metallicity',
+'particle_position_x',
+'particle_position_y',
+'particle_position_z',
+'particle_velocity_x',
+'particle_velocity_y',
+'particle_velocity_z',
+'particle_type']


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -37,32 +37,55 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
 
+KnownARTFields = FieldInfoContainer()
+add_art_field = KnownARTFields.add_field
+
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-KnownARTFields = FieldInfoContainer()
-add_art_field = KnownARTFields.add_field
+import numpy as na
 
-translation_dict = {"Density":"density",
-                    "TotalEnergy":"TotalEnergy",
-                    "x-velocity":"velocity_x",
-                    "y-velocity":"velocity_y",
-                    "z-velocity":"velocity_z",
-                    "Pressure":"pressure",
-                    "Metallicity":"metallicity",
-                    "GasEnergy":"GasEnergy"
-                   }
+#these are just the hydro fields
+known_art_fields = [ 'Density','TotalEnergy',
+                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+                     'Pressure','Gamma','GasEnergy',
+                     'MetalDensitySNII', 'MetalDensitySNIa',
+                     'PotentialNew','PotentialOld']
 
-for f,v in translation_dict.items():
-    add_art_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)])
-    add_art_field(f, function=TranslationFunc(v), take_log=True)
+#Add the fields, then later we'll individually defined units and names
+for f in known_art_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+              validators = [ValidateDataField(f)])
 
-#def _convertMetallicity(data):
-#    return data.convert("Metal_Density1")
-#KnownARTFields["Metal_Density1"]._units = r"1"
-#KnownARTFields["Metal_Density1"]._projected_units = r"1"
-#KnownARTFields["Metal_Density1"]._convert_function=_convertMetallicity
+#Hydro Fields that are verified to be OK unit-wise:
+#Density
+#Temperature
+
+#Hydro Fields that need to be tested:
+#TotalEnergy
+#XYZMomentum
+#Pressure
+#Gamma
+#GasEnergy
+#MetalDensity SNII + SNia
+#Potentials
+
+#Hydro Derived fields that are untested:
+#metallicities
+#xyzvelocity
+
+#Particle fields that are tested:
+#particle_position_xyz
+#particle_type
+#particle_index
+#particle_mass
+#particle_mass_initial
+#particle_age
+#particle_velocity
+#particle_metallicity12
+
+#Particle fields that are untested:
+#NONE
 
 
 def _convertDensity(data):
@@ -71,55 +94,143 @@
 KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 KnownARTFields["Density"]._convert_function=_convertDensity
 
-def _convertEnergy(data):
+def _convertTotalEnergy(data):
+    return data.convert("GasEnergy")
+KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
+KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
+
+def _convertXMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+
+def _convertYMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+
+def _convertZMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+
+def _convertPressure(data):
+    return data.convert("Pressure")
+KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
+KnownARTFields["Pressure"]._convert_function=_convertPressure
+
+def _convertGamma(data):
+    return 1.0
+KnownARTFields["Gamma"]._units = r""
+KnownARTFields["Gamma"]._projected_units = r""
+KnownARTFields["Gamma"]._convert_function=_convertGamma
+
+def _convertGasEnergy(data):
     return data.convert("GasEnergy")
 KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._convert_function=_convertEnergy
+KnownARTFields["GasEnergy"]._projected_units = r""
+KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
 
-def _Temperature(field, data):
-    tr  = data["GasEnergy"] / data["Density"]
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
+def _convertMetalDensitySNII(data):
+    return data.convert("Density")
+KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+
+def _convertMetalDensitySNIa(data):
+    return data.convert("Density")
+KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+
+def _convertPotentialNew(data):
+    return data.convert("Potential")
+KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
+
+def _convertPotentialOld(data):
+    return data.convert("Potential")
+KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
+
+####### Derived fields
+
+def _temperature(field, data):
+    tr  = data["GasEnergy"].astype('float64') #~1
+    d = data["Density"].astype('float64')
+    d[d==0.0] = -1.0 #replace the zeroes (that cause infs)
+    tr /= d #
+    assert na.all(na.isfinite(tr)) #diagnosing some problem...
     return tr
-def _convertTemperature(data):
-    return data.convert("Temperature")
-add_art_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
+def _converttemperature(data):
+    x  = data.pf.conversion_factors["Density"]
+    x /= data.pf.conversion_factors["GasEnergy"]
+    x *= data.pf.conversion_factors["Temperature"]
+    return x
+add_art_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 KnownARTFields["Temperature"]._units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._convert_function=_convertTemperature
+KnownARTFields["Temperature"]._projected_units = r"\mathrm{K}"
+KnownARTFields["Temperature"]._convert_function=_converttemperature
 
-def _MetallicitySNII(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _metallicity_snII(field, data):
+    tr  = data["MetalDensitySNII"] / data["Density"]
     return tr
-    
-add_art_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNII"]._units = r"\mathrm{K}"
+add_art_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metallicity_SNII"]._units = r""
+KnownARTFields["Metallicity_SNII"]._projected_units = r""
 
-def _MetallicitySNIa(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _metallicity_snIa(field, data):
+    tr  = data["MetalDensitySNIa"] / data["Density"]
     return tr
-    
-add_art_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNIa"]._units = r"\mathrm{K}"
+add_art_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metallicity_SNIa"]._units = r""
+KnownARTFields["Metallicity_SNIa"]._projected_units = r""
 
-def _Metallicity(field, data):
-    #get the dimensionless mass fraction of the total metals
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr += data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _x_velocity(data):
+    tr  = data["XMomentumDensity"]/data["Density"]
     return tr
-    
-add_art_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
-KnownARTFields["Metallicity"]._units = r"\mathrm{K}"
+add_field("x_velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["x_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["x_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _Metal_Density(field,data):
-    return data["Metal_DensitySNII"]+data["Metal_DensitySNIa"]
-def _convert_Metal_Density(data):
-    return data.convert("Metal_Density")
+def _y_velocity(data):
+    tr  = data["YMomentumDensity"]/data["Density"]
+    return tr
+add_field("y_velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["y_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["y_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
-KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
-KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density
+def _z_velocity(data):
+    tr  = data["ZMomentumDensity"]/data["Density"]
+    return tr
+add_field("z_velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["z_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+
+
+def _metal_density(field, data):
+    tr  = data["MetalDensitySNIa"]
+    tr += data["MetalDensitySNII"]
+    return tr
+add_art_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metal_Density"]._units = r""
+KnownARTFields["Metal_Density"]._projected_units = r""
+
+
+#Particle fields
+
+#Derived particle fields
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -25,16 +25,19 @@
 
 import numpy as na
 import struct
-import pdb
+
+import os
+import os.path
 
 from yt.utilities.io_handler import \
     BaseIOHandler
-import numpy as na
 
 from yt.utilities.io_handler import \
     BaseIOHandler
 import yt.utilities.amr_utils as au
 
+from yt.frontends.art.definitions import art_particle_field_names
+
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
 
@@ -47,7 +50,41 @@
         self.level_offsets = level_offsets
         self.level_data = {}
 
-    def preload_level(self, level):
+    def preload_level(self, level,field=None):
+        """ Reads in the full ART tree. From the ART source:
+            iOctLv :    >0   - level of an oct
+            iOctPr :         - parent of an oct
+            iOctCh :    >0   - pointer to an oct of children
+                        0   - there are no children; the cell is a leaf
+            iOctNb :    >0   - pointers to neighbouring cells 
+            iOctPs :         - coordinates of Oct centers
+            
+            iOctLL1:         - doubly linked list of octs
+            iOctLL2:         - doubly linked list of octs
+            
+            tl - current  time moment for level L
+            tlold - previous time moment for level L
+            dtl - dtime0/2**iTimeBin
+            dtlold -  previous time step for level L
+            iSO - sweep order
+            
+            hvar(1,*) - gas density 
+            hvar(2,*) - gas energy 
+            hvar(3,*) - x-momentum 
+            hvar(4,*) - y-momentum
+            hvar(5,*) - z-momentum
+            hvar(6,*) - pressure
+            hvar(7,*) - Gamma
+            hvar(8,*) - internal energy 
+
+            var (1,*) - total density 
+            var (2,*) - potential (new)
+            var (3,*) - potential (old)
+            
+            
+            
+        """
+        
         if level in self.level_data: return
         if level == 0:
             self.preload_root_level()
@@ -58,44 +95,88 @@
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
         arr = na.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        arr = arr[3:-1,:].astype("float64")
-        self.level_data[level] = arr
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
+        if field==None:
+            self.level_data[level] = arr.astype('float32')
+        else:
+            self.level_data[level] = arr.astype('float32')
+        del arr
 
     def preload_root_level(self):
         f = open(self.filename, 'rb')
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
-        #pdb.set_trace()
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float64")
+        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
         na.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float64")
+        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
         arr = na.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
         self.level_data.pop(level, None)
+
+    def _read_particle_field(self, grid, field):
+        #This will be cleaned up later
+        idx = na.array(grid.particle_indices)
+        if field == 'particle_index':
+            return na.array(idx)
+        if field == 'particle_type':
+            return grid.pf.particle_type[idx]
+        if field == 'particle_position_x':
+            return grid.pf.particle_position[idx][:,0]
+        if field == 'particle_position_y':
+            return grid.pf.particle_position[idx][:,1]
+        if field == 'particle_position_z':
+            return grid.pf.particle_position[idx][:,2]
+        if field == 'particle_mass':
+            return grid.pf.particle_mass[idx]
+        if field == 'particle_velocity_x':
+            return grid.pf.particle_velocity[idx][:,0]
+        if field == 'particle_velocity_y':
+            return grid.pf.particle_velocity[idx][:,1]
+        if field == 'particle_velocity_z':
+            return grid.pf.particle_velocity[idx][:,2]
+        
+        #stellar fields
+        if field == 'particle_age':
+            return grid.pf.particle_age[idx]
+        if field == 'particle_metallicity':
+            return grid.pf.particle_metallicity1[idx] +\
+                   grid.pf.particle_metallicity2[idx]
+        if field == 'particle_metallicity1':
+            return grid.pf.particle_metallicity1[idx]
+        if field == 'particle_metallicity2':
+            return grid.pf.particle_metallicity2[idx]
+        if field == 'particle_mass_initial':
+            return grid.pf.particle_mass_initial[idx]
+        
+        raise 'Should have matched one of the particle fields...'
+
         
     def _read_data_set(self, grid, field):
+        if field in art_particle_field_names:
+            return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
         if grid.Level == 0: # We only have one root grid
             self.preload_level(0)
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
-            return tr.swapaxes(0, 2)
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
-        to_fill = grid.ActiveDimensions.prod()
+            return tr.swapaxes(0, 2).astype("float64")
+        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
+        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
             for g in grids:
-                self.preload_level(g.Level)
+                self.preload_level(g.Level,field=field_id)
                 #print "Filling %s from %s (%s)" % (grid, g, g.Level)
                 to_fill -= au.read_art_grid(field_id, 
                         grid.get_global_startindex(), grid.ActiveDimensions,
@@ -104,11 +185,294 @@
                 next_grids += g.Parent
             grids = next_grids
             l_delta += 1
-        return tr
+        return tr.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid, field)[sl]
 
+def _count_art_octs(f, offset, 
+                   MinLev, MaxLevelNow):
+    level_oct_offsets= [0,]
+    level_child_offsets= [0,]
+    f.seek(offset)
+    nchild,ntot=8,0
+    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
+        level_oct_offsets.append(f.tell())
 
+        #Get the info for this level, skip the rest
+        #print "Reading oct tree data for level", Lev
+        #print 'offset:',f.tell()
+        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
+           '>iii', _read_record(f))
+        #print 'Level %i : '%Lev, iNOLL
+        #print 'offset after level record:',f.tell()
+        iOct = iHOLL[Lev] - 1
+        nLevel = iNOLL[Lev]
+        nLevCells = nLevel * nchild
+        ntot = ntot + nLevel
+
+        #Skip all the oct hierarchy data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel)
+
+        level_child_offsets.append(f.tell())
+        #Skip the child vars data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel*nchild)
+
+        #find nhydrovars
+        nhydrovars = 8+2
+    f.seek(offset)
+    return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
+
+def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+    pos = f.tell()
+    f.seek(level_oct_offsets[level])
+    #Get the info for this level, skip the rest
+    junk, nLevel, iOct = struct.unpack(
+       '>iii', _read_record(f))
+    
+    #fortran indices start at 1
+    
+    #Skip all the oct hierarchy data
+    le     = na.zeros((nLevel,3),dtype='int64')
+    fl     = na.ones((nLevel,6),dtype='int64')
+    iocts  = na.zeros(nLevel+1,dtype='int64')
+    idxa,idxb = 0,0
+    chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
+    left = nLevel
+    while left > 0 :
+        this_chunk = min(chunk,left)
+        idxb=idxa+this_chunk
+        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data=data.reshape(this_chunk,15)
+        left-=this_chunk
+        le[idxa:idxb,:] = data[:,1:4]
+        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        #pad byte is last, LL2, then ioct right before it
+        iocts[idxa:idxb] = data[:,-3] 
+        idxa=idxa+this_chunk
+    del data
+    
+    #ioct always represents the index of the next variable
+    #not the current, so shift forward one index
+    #the last index isn't used
+    ioctso = iocts.copy()
+    iocts[1:]=iocts[:-1] #shift
+    iocts = iocts[:nLevel] #chop off the last index
+    iocts[0]=iOct #starting value
+
+    #now correct iocts for fortran indices start @ 1
+    iocts = iocts-1
+
+    assert na.unique(iocts).shape[0] == nLevel
+    
+    #ioct tries to access arrays much larger than le & fl
+    #just make sure they appear in the right order, skipping
+    #the empty space in between
+    idx = na.argsort(iocts)
+    
+    #now rearrange le & fl in order of the ioct
+    le = le[idx]
+    fl = fl[idx]
+
+    #left edges are expressed as if they were on 
+    #level 15, so no matter what level max(le)=2**15 
+    #correct to the yt convention
+    #le = le/2**(root_level-1-level)-1
+
+    #try without the -1
+    le = le/2**(root_level-2-level)-1
+
+    #now read the hvars and vars arrays
+    #we are looking for iOctCh
+    #we record if iOctCh is >0, in which it is subdivided
+    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    
+    
+    
+    f.seek(pos)
+    return le,fl,nLevel
+
+
+def read_particles(file,nstars,Nrow):
+    words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
+    real_size = 4 # for file_particle_data; not always true?
+    np = nstars # number of particles including stars, should come from lspecies[-1]
+    np_per_page = Nrow**2 # defined in ART a_setup.h
+    num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
+
+    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    return data[:,0:3],data[:,3:]
+
+def read_stars(file,nstars,Nrow):
+    fh = open(file,'rb')
+    tdum,adum   = _read_frecord(fh,'>d')
+    nstars      = _read_frecord(fh,'>i')
+    ws_old, ws_oldi = _read_frecord(fh,'>d')
+    mass    = _read_frecord(fh,'>f') 
+    imass   = _read_frecord(fh,'>f') 
+    tbirth  = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metallicity1 = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metallicity2 = _read_frecord(fh,'>f')     
+    assert fh.tell() == os.path.getsize(file)
+    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+
+def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
+    f.seek(level_child_offsets[level])
+    nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
+    ioctch = na.zeros(nLevel,dtype='uint8')
+    idc = na.zeros(nLevel,dtype='int32')
+    
+    chunk = long(1e6)
+    left = nLevel
+    width = nhydro_vars+6
+    a,b=0,0
+    while left > 0:
+        chunk = min(chunk,left)
+        b += chunk
+        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = arr.reshape((width, chunk), order="F")
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        idc[a:b]    = arr[1,:]-1 #fix fortran indexing
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        #zero in the mask means there is refinement available
+        a=b
+        left -= chunk
+    assert left==0
+    return idc,ioctch
+    
+nchem=8+2
+dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+                ",>%sf4"%(2)+",>i4")
+def _read_art_child(f, level_child_offsets,level,nLevel,field):
+    pos=f.tell()
+    f.seek(level_child_offsets[level])
+    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = arr.reshape((nLevel,16), order="F")
+    arr = arr[3:-1,:].astype("float64")
+    f.seek(pos)
+    return arr[field,:]
+
+def _skip_record(f):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(s[0], 1)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+
+def _read_frecord(f,fmt):
+    s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    count = s1/na.dtype(fmt).itemsize
+    ss = na.fromfile(f,fmt,count=count)
+    s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    assert s1==s2
+    return ss
+
+
+def _read_record(f,fmt=None):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    ss = f.read(s)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    if fmt is not None:
+        return struct.unpack(ss,fmt)
+    return ss
+
+def _read_record_size(f):
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def _read_struct(f,structure,verbose=False):
+    vals = {}
+    for format,name in structure:
+        size = struct.calcsize(format)
+        (val,) = struct.unpack(format,f.read(size))
+        vals[name] = val
+        if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
+    return vals
+
+
+
+#All of these functions are to convert from hydro time var to 
+#proper time
+sqrt = na.sqrt
+sign = na.sign
+
+def find_root(f,a,b,tol=1e-6):
+    c = (a+b)/2.0
+    last = -na.inf
+    assert(sign(f(a)) != sign(f(b)))  
+    while na.abs(f(c)-last) > tol:
+        last=f(c)
+        if sign(last)==sign(f(b)):
+            b=c
+        else:
+            a=c
+        c = (a+b)/2.0
+    return c
+
+def quad(fintegrand,xmin,xmax,n=1e4):
+    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    integrand_arr = fintegrand(spacings)
+    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    return val
+
+def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
+    def f_a2b(x):
+        val = 0.5*sqrt(Om0) / x**3.0
+        val /= sqrt(Om0/x**3.0 +Oml0 +(1.0 - Om0-Oml0)/x**2.0)
+        return val
+    #val, err = si.quad(f_a2b,1,at)
+    val = quad(f_a2b,1,at)
+    return val
+
+def b2a(bt,**kwargs):
+    #converts code time into expansion factor 
+    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
+    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
+    f_b2a = lambda at: a2b(at,**kwargs)-bt
+    return find_root(f_b2a,1e-4,1.1)
+    #return so.brenth(f_b2a,1e-4,1.1)
+    #return brent.brent(f_b2a)
+
+def a2t(at,Om0=0.27,Oml0=0.73,h=0.700):
+    integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
+    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
+    current_time = quad(integrand,1e-4,at)
+    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #integrand_arr = integrand(spacings)
+    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    current_time *= 9.779/h
+    return current_time
+
+def b2t(tb,n = 1e2,logger=None,**kwargs):
+    tb = na.array(tb)
+    if type(tb) == type(1.1): 
+        return a2t(b2a(tb))
+    if tb.shape == (): 
+        return a2t(b2a(tb))
+    if len(tb) < n: n= len(tb)
+    age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
+    age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
+    tbs  = -1.*na.logspace(na.log10(-tb.min()),
+                          na.log10(-tb.max()),n)
+    ages = []
+    for i,tbi in enumerate(tbs):
+        ages += a2t(b2a(tbi)),
+        if logger: logger(i)
+    ages = na.array(ages)
+    fb2t = na.interp(tb,tbs,ages)
+    #fb2t = interp1d(tbs,ages)
+    return fb2t
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os
-import sys
-import os.path
+import os, sys, os.path
 
-
-def configuration(parent_package='', top_path=None):
+def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
+    config = Configuration('art',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -121,8 +121,37 @@
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
         self._levels = self._fhandle.keys()[1:]
         AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
         self._fhandle.close()
 
+    def _read_particles(self):
+        self.particle_filename = self.hierarchy_filename[:-4] + 'sink'
+        if not os.path.exists(self.particle_filename): return
+        with open(self.particle_filename, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip().split(' ')[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py                                                                                                             
+                mask=na.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = na.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = na.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+
     def _initialize_data_storage(self):
         pass
 


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,6 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+import numpy as na
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -76,12 +77,12 @@
                   units=r"",display_name=r"B_z")
 KnownChomboFields["Z-magnfield"]._projected_units=r""
 
-add_chombo_field("energy-density", function=lambda a,b: None, take_log=True,
+add_chombo_field("energy-density", function=NullFunc, take_log=True,
                  validators = [ValidateDataField("energy-density")],
                  units=r"\rm{erg}/\rm{cm}^3")
 KnownChomboFields["energy-density"]._projected_units =r""
 
-add_chombo_field("radiation-energy-density", function=lambda a,b: None, take_log=True,
+add_chombo_field("radiation-energy-density", function=NullFunc, take_log=True,
                  validators = [ValidateDataField("radiation-energy-density")],
                  units=r"\rm{erg}/\rm{cm}^3")
 KnownChomboFields["radiation-energy-density"]._projected_units =r""
@@ -125,3 +126,36 @@
     return data["Z-momentum"]/data["density"]
 add_field("z-velocity",function=_zVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return na.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,6 +25,7 @@
 """
 import h5py
 import re
+import numpy as na
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -70,3 +71,41 @@
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid,field)[sl]
 
+    def _read_particles(self, grid, field):
+        """
+        parses the Orion Star Particle text files
+             
+        """
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        def read(line, field):
+            return float(line.split(' ')[index[field]])
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z")
+                    if ( (grid.LeftEdge < coord).all() and
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return na.array(particles)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -38,6 +38,9 @@
       EnzoStaticOutput, \
       EnzoStaticOutputInMemory
 
+from .simulation_handling import \
+    EnzoSimulation
+
 from .fields import \
       EnzoFieldInfo, \
       Enzo2DFieldInfo, \


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -51,7 +51,7 @@
 _speciesList = ["HI", "HII", "Electron",
                 "HeI", "HeII", "HeIII",
                 "H2I", "H2II", "HM",
-                "DI", "DII", "HDI", "Metal", "PreShock"]
+                "DI", "DII", "HDI", "Metal", "MetalSNIa", "PreShock"]
 _speciesMass = {"HI": 1.0, "HII": 1.0, "Electron": 1.0,
                 "HeI": 4.0, "HeII": 4.0, "HeIII": 4.0,
                 "H2I": 2.0, "H2II": 2.0, "HM": 1.0,
@@ -224,7 +224,10 @@
 _default_fields = ["Density","Temperature",
                    "x-velocity","y-velocity","z-velocity",
                    "x-momentum","y-momentum","z-momentum",
-                   "Bx", "By", "Bz", "Dust_Temperature"]
+                   "Bx", "By", "Bz", "Dust_Temperature",
+                   "HI_kph", "HeI_kph", "HeII_kph", "H2I_kdiss", "PhotoGamma",
+                   "RadAccel1", "RadAccel2", "RadAccel3", "SN_Colour",
+                   "Ray_Segments"]
 # else:
 #     _default_fields = ["Density","Temperature","Gas_Energy","Total_Energy",
 #                        "x-velocity","y-velocity","z-velocity"]
@@ -247,11 +250,35 @@
     f._units=r"\mathrm{Gau\ss}"
     f.take_log=False
 
+def _convertkph(data):
+    return data.convert("Time")
+for field in ["HI_kph", "HeI_kph", "HeII_kph", "H2I_kdiss"]:
+    f = KnownEnzoFields[field]
+    f._convert_function = _convertkph
+    f._units=r"\rm{s}^{-1}"
+    f.take_log=True
+
+def _convertRadiationAccel(data):
+    return data.convert("cm") / data.convert("Time")
+for dim in range(1,4):
+    f = KnownEnzoFields["RadAccel%d" % dim]
+    f._convert_function = _convertRadiationAccel
+    f._units=r"\rm{cm}\ \rm{s}^{-2}"
+    f.take_log=False
+def _RadiationAccelerationMagnitude(field, data):
+    return ( data["RadAccel1"]**2 + data["RadAccel2"]**2 +
+             data["RadAccel3"]**2 )**(1.0/2.0)
+add_field("RadiationAcceleration", 
+          function=_RadiationAccelerationMagnitude,
+          validators=ValidateDataField(["RadAccel1", "RadAccel2", "RadAccel3"]),
+          display_name="Radiation\ Acceleration", units=r"\rm{cm} \rm{s}^{-2}")
+
 # Now we override
 
 def _convertDensity(data):
     return data.convert("Density")
-for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ]:
+for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
+        ["SN_Colour"]:
     KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
     KnownEnzoFields[field]._projected_units = r"\rm{g}/\rm{cm}^2"
     KnownEnzoFields[field]._convert_function=_convertDensity


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/enzo/simulation_handling.py
--- /dev/null
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -0,0 +1,692 @@
+"""
+EnzoSimulation class and member functions.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.funcs import *
+
+import numpy as na
+import glob
+import os
+
+from yt.data_objects.time_series import \
+    TimeSeriesData
+from yt.utilities.cosmology import \
+    Cosmology, \
+    EnzoCosmology
+from yt.utilities.exceptions import \
+    YTException
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+
+from yt.convenience import \
+    load
+
+class EnzoSimulation(TimeSeriesData):
+    r"""Super class for performing the same operation over all data outputs in 
+    a simulation from one redshift to another.
+    """
+    def __init__(self, parameter_filename):
+        r"""Initialize an Enzo Simulation object.
+
+        Upon creation, the parameter file is parsed and the time and redshift
+        are calculated and stored in all_outputs.  A time units dictionary is
+        instantiated to allow for time outputs to be requested with physical
+        time units.  The get_time_series can be used to generate a
+        TimeSeriesData object.
+
+        parameter_filename : str
+            The simulation parameter file.
+        
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> es = ES.EnzoSimulation("my_simulation.par")
+        >>> print es.all_outputs
+
+        """
+        self.parameter_filename = parameter_filename
+        self.parameters = {}
+
+        # Set some parameter defaults.
+        self._set_parameter_defaults()
+        # Read the simulation parameter file.
+        self._parse_parameter_file()
+        # Set up time units dictionary.
+        self._set_time_units()
+
+        # Figure out the starting and stopping times and redshift.
+        self._calculate_simulation_bounds()
+        self.print_key_parameters()
+        
+        # Get all possible datasets.
+        self._get_all_outputs()
+
+    def get_time_series(self, time_data=True, redshift_data=True,
+                        initial_time=None, final_time=None, time_units='1',
+                        initial_redshift=None, final_redshift=None,
+                        initial_cycle=None, final_cycle=None,
+                        times=None, redshifts=None, tolerance=None,
+                        find_outputs=False, parallel=True):
+
+        """
+        Instantiate a TimeSeriesData object for a set of outputs.
+
+        If no additional keywords given, a TimeSeriesData object will be
+        created with all potential datasets created by the simulation.
+
+        Outputs can be gather by specifying a time or redshift range
+        (or combination of time and redshift), with a specific list of
+        times or redshifts, a range of cycle numbers (for cycle based
+        output), or by simply searching all subdirectories within the
+        simulation directory.
+
+        time_data : bool
+            Whether or not to include time outputs when gathering
+            datasets for time series.
+            Default: True.
+        redshift_data : bool
+            Whether or not to include redshift outputs when gathering
+            datasets for time series.
+            Default: True.
+        initial_time : float
+            The earliest time for outputs to be included.  If None,
+            the initial time of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_time : float
+            The latest time for outputs to be included.  If None,
+            the final time of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        times : array_like
+            A list of times for which outputs will be found.
+            Default: None.
+        time_units : str
+            The time units used for requesting outputs by time.
+            Default: '1' (code units).
+        initial_redshift : float
+            The earliest redshift for outputs to be included.  If None,
+            the initial redshift of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_time : float
+            The latest redshift for outputs to be included.  If None,
+            the final redshift of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        redshifts : array_like
+            A list of redshifts for which outputs will be found.
+            Default: None.
+        initial_cycle : float
+            The earliest cycle for outputs to be included.  If None,
+            the initial cycle of the simulation is used.  This can
+            only be used with final_cycle.
+            Default: None.
+        final_cycle : float
+            The latest cycle for outputs to be included.  If None,
+            the final cycle of the simulation is used.  This can
+            only be used in combination with initial_cycle.
+            Default: None.
+        tolerance : float
+            Used in combination with "times" or "redshifts" keywords,
+            this is the tolerance within which outputs are accepted
+            given the requested times or redshifts.  If None, the
+            nearest output is always taken.
+            Default: None.
+        find_outputs : bool
+            If True, subdirectories within the GlobalDir directory are
+            searched one by one for datasets.  Time and redshift
+            information are gathered by temporarily instantiating each
+            dataset.  This can be used when simulation data was created
+            in a non-standard way, making it difficult to guess the
+            corresponding time and redshift information.
+            Default: False.
+        parallel : bool/int
+            If True, the generated TimeSeriesData will divide the work
+            such that a single processor works on each dataset.  If an
+            integer is supplied, the work will be divided into that
+            number of jobs.
+            Default: True.
+
+        Examples
+        --------
+        >>> es.get_time_series(initial_redshift=10, final_time=13.7,
+                               time_units='Gyr', redshift_data=False)
+
+        >>> es.get_time_series(redshifts=[3, 2, 1, 0])
+
+        >>> es.get_time_series(final_cycle=100000)
+
+        >>> es.get_time_series(find_outputs=True)
+
+        >>> # after calling get_time_series
+        >>> for pf in es.piter():
+        >>>     pc = PlotCollection(pf, 'c')
+        >>>     pc.add_projection('Density', 0)
+        >>>     pc.save()
+
+        """
+
+        if (initial_redshift is not None or \
+            final_redshift is not None) and \
+            not self.cosmological_simulation:
+            mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
+            return
+
+        if find_outputs:
+            my_outputs = self._find_outputs()
+
+        else:
+            if time_data and redshift_data:
+                my_all_outputs = self.all_outputs
+            elif time_data:
+                my_all_outputs = self.all_time_outputs
+            elif redshift_data:
+                my_all_outputs = self.all_redshift_outputs
+            else:
+                mylog.error('Both time_data and redshift_data are False.')
+                return
+
+            if times is not None:
+                my_outputs = self._get_outputs_by_time(times, tolerance=tolerance,
+                                                       outputs=my_all_outputs,
+                                                       time_units=time_units)
+
+            elif redshifts is not None:
+                my_outputs = self._get_outputs_by_redshift(redshifts, tolerance=tolerance,
+                                                           outputs=my_all_outputs)
+
+            elif initial_cycle is not None or final_cycle is not None:
+                if initial_cycle is None:
+                    initial_cycle = 0
+                else:
+                    initial_cycle = max(initial_cycle, 0)
+                if final_cycle is None:
+                    final_cycle = self.parameters['StopCycle']
+                else:
+                    final_cycle = min(final_cycle, self.parameters['StopCycle'])
+                my_outputs = my_all_outputs[int(ceil(float(initial_cycle) /
+                                                     self.parameters['CycleSkipDataDump'])):
+                                            (final_cycle /  self.parameters['CycleSkipDataDump'])+1]
+
+            else:
+                if initial_time is not None:
+                    my_initial_time = initial_time / self.time_units[time_units]
+                elif initial_redshift is not None:
+                    my_initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(initial_redshift) / \
+                        self.enzo_cosmology.TimeUnits
+                else:
+                    my_initial_time = self.initial_time
+
+                if final_time is not None:
+                    my_final_time = final_time / self.time_units[time_units]
+                elif final_redshift is not None:
+                    my_final_time = self.enzo_cosmology.ComputeTimeFromRedshift(final_redshift) / \
+                        self.enzo_cosmology.TimeUnits
+                else:
+                    my_final_time = self.final_time
+                    
+                my_times = na.array(map(lambda a:a['time'], my_all_outputs))
+                my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+                if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
+                my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
+
+        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
+                                parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        """
+        Print out some key parameters for the simulation.
+        """
+        for a in ["domain_dimensions", "domain_left_edge",
+                  "domain_right_edge", "initial_time", "final_time",
+                  "stop_cycle", "cosmological_simulation"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+        if hasattr(self, "cosmological_simulation") and \
+           getattr(self, "cosmological_simulation"):
+            for a in ["omega_lambda", "omega_matter",
+                      "hubble_constant", "initial_redshift",
+                      "final_redshift"]:
+                if not hasattr(self, a):
+                    mylog.error("Missing %s in parameter file definition!", a)
+                    continue
+                v = getattr(self, a)
+                mylog.info("Parameters: %-25s = %s", a, v)
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+
+        self.conversion_factors = {}
+        redshift_outputs = []
+
+        # Let's read the file
+        lines = open(self.parameter_filename).readlines()
+        for line in (l.strip() for l in lines):
+            if '#' in line: line = line[0:line.find('#')]
+            if '//' in line: line = line[0:line.find('//')]
+            if len(line) < 2: continue
+            param, vals = (i.strip() for i in line.split("="))
+            # First we try to decipher what type of value it is.
+            vals = vals.split()
+            # Special case approaching.
+            if "(do" in vals: vals = vals[:1]
+            if len(vals) == 0:
+                pcast = str # Assume NULL output
+            else:
+                v = vals[0]
+                # Figure out if it's castable to floating point:
+                try:
+                    float(v)
+                except ValueError:
+                    pcast = str
+                else:
+                    if any("." in v or "e+" in v or "e-" in v for v in vals):
+                        pcast = float
+                    elif v == "inf":
+                        pcast = str
+                    else:
+                        pcast = int
+            # Now we figure out what to do with it.
+            if param.endswith("Units") and not param.startswith("Temperature"):
+                dataType = param[:-5]
+                # This one better be a float.
+                self.conversion_factors[dataType] = float(vals[0])
+            if param.startswith("CosmologyOutputRedshift["):
+                index = param[param.find("[")+1:param.find("]")]
+                redshift_outputs.append({'index':int(index), 'redshift':float(vals[0])})
+            elif len(vals) == 0:
+                vals = ""
+            elif len(vals) == 1:
+                vals = pcast(vals[0])
+            else:
+                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+            self.parameters[param] = vals
+        self.refine_by = self.parameters["RefineBy"]
+        self.dimensionality = self.parameters["TopGridRank"]
+        if self.dimensionality > 1:
+            self.domain_dimensions = self.parameters["TopGridDimensions"]
+            if len(self.domain_dimensions) < 3:
+                tmp = self.domain_dimensions.tolist()
+                tmp.append(1)
+                self.domain_dimensions = na.array(tmp)
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                                             "float64").copy()
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+                                             "float64").copy()
+        else:
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                                             "float64")
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+                                             "float64")
+            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+
+        if self.parameters["ComovingCoordinates"]:
+            cosmo_attr = {'omega_lambda': 'CosmologyOmegaLambdaNow',
+                          'omega_matter': 'CosmologyOmegaMatterNow',
+                          'hubble_constant': 'CosmologyHubbleConstantNow',
+                          'initial_redshift': 'CosmologyInitialRedshift',
+                          'final_redshift': 'CosmologyFinalRedshift'}
+            self.cosmological_simulation = 1
+            for a, v in cosmo_attr.items():
+                if not v in self.parameters:
+                    raise MissingParameter(self.parameter_filename, v)
+                setattr(self, a, self.parameters[v])
+        else:
+            self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+        # make list of redshift outputs
+        self.all_redshift_outputs = []
+        if not self.cosmological_simulation: return
+        for output in redshift_outputs:
+            output['filename'] = os.path.join(self.parameters['GlobalDir'],
+                                              "%s%04d" % (self.parameters['RedshiftDumpDir'],
+                                                          output['index']),
+                                              "%s%04d" % (self.parameters['RedshiftDumpName'],
+                                                          output['index']))
+            del output['index']
+        self.all_redshift_outputs = redshift_outputs
+
+    def _calculate_redshift_dump_times(self):
+        "Calculates time from redshift of redshift outputs."
+
+        if not self.cosmological_simulation: return
+        for output in self.all_redshift_outputs:
+            output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
+                self.enzo_cosmology.TimeUnits
+
+    def _calculate_time_outputs(self):
+        "Calculate time outputs and their redshifts if cosmological."
+
+        if self.final_time is None or \
+            not 'dtDataDump' in self.parameters or \
+            self.parameters['dtDataDump'] <= 0.0: return []
+
+        self.all_time_outputs = []
+        index = 0
+        current_time = self.initial_time
+        while current_time <= self.final_time + self.parameters['dtDataDump']:
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%04d" % (self.parameters['DataDumpDir'], index),
+                                    "%s%04d" % (self.parameters['DataDumpName'], index))
+
+            output = {'index': index, 'filename': filename, 'time': current_time}
+            output['time'] = min(output['time'], self.final_time)
+            if self.cosmological_simulation:
+                output['redshift'] = self.enzo_cosmology.ComputeRedshiftFromTime(
+                    current_time * self.enzo_cosmology.TimeUnits)
+
+            self.all_time_outputs.append(output)
+            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            current_time += self.parameters['dtDataDump']
+            index += 1
+
+    def _calculate_cycle_outputs(self):
+        "Calculate cycle outputs."
+
+        mylog.warn('Calculating cycle outputs.  Dataset times will be unavailable.')
+
+        if self.stop_cycle is None or \
+            not 'CycleSkipDataDump' in self.parameters or \
+            self.parameters['CycleSkipDataDump'] <= 0.0: return []
+
+        self.all_time_outputs = []
+        index = 0
+        for cycle in range(0, self.stop_cycle+1, self.parameters['CycleSkipDataDump']):
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%04d" % (self.parameters['DataDumpDir'], index),
+                                    "%s%04d" % (self.parameters['DataDumpName'], index))
+
+            output = {'index': index, 'filename': filename, 'cycle': cycle}
+            self.all_time_outputs.append(output)
+            index += 1
+
+    def _get_all_outputs(self):
+        "Get all potential datasets and combine into a time-sorted list."
+
+        if self.parameters['dtDataDump'] > 0 and \
+            self.parameters['CycleSkipDataDump'] > 0:
+            raise AmbiguousOutputs(self.parameter_filename)
+
+        # Get all time or cycle outputs.
+        if self.parameters['CycleSkipDataDump'] > 0:
+            self._calculate_cycle_outputs()
+        else:
+            self._calculate_time_outputs()
+
+        # Calculate times for redshift outputs.
+        self._calculate_redshift_dump_times()
+
+        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+        if self.parameters['CycleSkipDataDump'] <= 0:
+            self.all_outputs.sort(key=lambda obj:obj['time'])
+
+        mylog.info("Total datasets: %d." % len(self.all_outputs))
+
+    def _calculate_simulation_bounds(self):
+        """
+        Figure out the starting and stopping time and redshift for the simulation.
+        """
+
+        if 'StopCycle' in self.parameters:
+            self.stop_cycle = self.parameters['StopCycle']
+
+        # Convert initial/final redshifts to times.
+        if self.cosmological_simulation:
+            # Instantiate EnzoCosmology object for units and time conversions.
+            self.enzo_cosmology = EnzoCosmology(HubbleConstantNow=
+                                                (100.0 * self.parameters['CosmologyHubbleConstantNow']),
+                                                OmegaMatterNow=self.parameters['CosmologyOmegaMatterNow'],
+                                                OmegaLambdaNow=self.parameters['CosmologyOmegaLambdaNow'],
+                                                InitialRedshift=self.parameters['CosmologyInitialRedshift'])
+            self.initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.initial_redshift) / \
+                self.enzo_cosmology.TimeUnits
+            self.final_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.final_redshift) / \
+                self.enzo_cosmology.TimeUnits
+
+        # If not a cosmology simulation, figure out the stopping criteria.
+        else:
+            if 'InitialTime' in self.parameters:
+                self.initial_time = self.parameters['InitialTime']
+            else:
+                self.initial_time = 0.
+
+            if 'StopTime' in self.parameters:
+                self.final_time = self.parameters['StopTime']
+            else:
+                self.final_time = None
+            if not ('StopTime' in self.parameters or
+                    'StopCycle' in self.parameters):
+                raise NoStoppingCondition(self.parameter_filename)
+            if self.final_time is None:
+                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+                           self.parameter_filename)
+
+    def _set_parameter_defaults(self):
+        "Set some default parameters to avoid problems if they are not in the parameter file."
+
+        self.parameters['GlobalDir'] = "."
+        self.parameters['DataDumpName'] = "data"
+        self.parameters['DataDumpDir'] = "DD"
+        self.parameters['RedshiftDumpName'] = "RedshiftOutput"
+        self.parameters['RedshiftDumpDir'] = "RD"
+        self.parameters['ComovingCoordinates'] = 0
+        self.parameters['TopGridRank'] = 3
+        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['StopCycle'] = 100000
+        self.parameters['dtDataDump'] = 0.
+        self.parameters['CycleSkipDataDump'] = 0.
+        self.parameters['TimeUnits'] = 1.
+
+    def _set_time_units(self):
+        """
+        Set up a dictionary of time units conversions.
+        """
+
+        self.time_units = {}
+        if self.cosmological_simulation:
+            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+                / self.hubble_constant / (1 + self.initial_redshift)**1.5
+        self.time_units['1'] = 1.
+        self.time_units['seconds'] = self.parameters['TimeUnits']
+        self.time_units['years'] = self.time_units['seconds'] / (365*3600*24.0)
+        self.time_units['days']  = self.time_units['seconds'] / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+
+    def _find_outputs(self):
+        """
+        Search for directories matching the data dump keywords.
+        If found, get dataset times py opening the pf.
+        """
+
+        # look for time outputs.
+        potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                                   "%s*" % self.parameters['DataDumpDir'])) + \
+                            glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                                   "%s*" % self.parameters['RedshiftDumpDir']))
+        time_outputs = []
+        mylog.info("Checking %d potential time outputs." % 
+                   len(potential_outputs))
+
+        for output in potential_outputs:
+            if self.parameters['DataDumpDir'] in output:
+                dir_key = self.parameters['DataDumpDir']
+                output_key = self.parameters['DataDumpName']
+            else:
+                dir_key = self.parameters['RedshiftDumpDir']
+                output_key = self.parameters['RedshiftDumpName']
+            index = output[output.find(dir_key) + len(dir_key):]
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%s" % (dir_key, index),
+                                    "%s%s" % (output_key, index))
+            if os.path.exists(filename):
+                pf = load(filename)
+                if pf is not None:
+                    time_outputs.append({'filename': filename, 'time': pf.current_time})
+                    if pf.cosmological_simulation:
+                        time_outputs[-1]['redshift'] = pf.current_redshift
+                del pf
+        mylog.info("Located %d time outputs." % len(time_outputs))
+        time_outputs.sort(key=lambda obj: obj['time'])
+        return time_outputs
+
+    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
+        r"""Get datasets at or near to given values.
+        
+        Parameters
+        ----------
+        key: str
+            The key by which to retrieve outputs, usually 'time' or
+            'redshift'.
+        values: array_like
+            A list of values, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
+        
+        """
+
+        values = ensure_list(values)
+        if outputs is None:
+            outputs = self.all_outputs
+        my_outputs = []
+        for value in values:
+            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
+            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+                    and outputs[0] not in my_outputs:
+                my_outputs.append(outputs[0])
+            else:
+                mylog.error("No dataset added for %s = %f." % (key, value))
+
+        outputs.sort(key=lambda obj: obj['time'])
+        return my_outputs
+
+    def _get_outputs_by_redshift(self, redshifts, tolerance=None, outputs=None):
+        r"""Get datasets at or near to given redshifts.
+        
+        Parameters
+        ----------
+        redshifts: array_like
+            A list of redshifts, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_redshift([0, 1, 2], tolerance=0.1)
+        
+        """
+
+        return self._get_outputs_by_key('redshift', redshifts, tolerance=tolerance,
+                                     outputs=outputs)
+
+    def _get_outputs_by_time(self, times, tolerance=None, outputs=None,
+                             time_units='1'):
+        r"""Get datasets at or near to given times.
+        
+        Parameters
+        ----------
+        times: array_like
+            A list of times, given in code units as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the time is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default = None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        time_units : str
+            The units of the list of times.
+            Default: '1' (code units).
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_time([600, 500, 400], tolerance=10.)
+        
+        """
+
+        times = na.array(times) / self.time_units[time_units]
+        return self._get_outputs_by_key('time', times, tolerance=tolerance,
+                                        outputs=outputs)
+
+class MissingParameter(YTException):
+    def __init__(self, pf, parameter):
+        YTException.__init__(self, pf)
+        self.parameter = parameter
+
+    def __str__(self):
+        return "Parameter file %s is missing %s parameter." % \
+            (self.pf, self.parameter)
+
+class NoStoppingCondition(YTException):
+    def __init__(self, pf):
+        YTException.__init__(self, pf)
+
+    def __str__(self):
+        return "Simulation %s has no stopping condition.  StopTime or StopCycle should be set." % \
+            self.pf
+
+class AmbiguousOutputs(YTException):
+    def __init__(self, pf):
+        YTException.__init__(self, pf)
+
+    def __str__(self):
+        return "Simulation %s has both dtDataDump and CycleSkipDataDump set.  Unable to calculate datasets." % \
+            self.pf
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -163,16 +163,16 @@
                 for i in xrange(len(coord)):
                     na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
                     na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                    ind = na.where(mask == 1)
-                    selected_grids = self.grids[ind]
-                    # in orion, particles always live on the finest level.
-                    # so, we want to assign the particle to the finest of
-                    # the grids we just found
-                    if len(selected_grids) != 0:
-                        grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                        ind = na.where(self.grids == grid)[0][0]
-                        self.grid_particle_count[ind] += 1
-                        self.grids[ind].NumberOfParticles += 1
+                ind = na.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = na.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
         return True
                 
     def readGlobalHeader(self,filename,paranoid_read):


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -829,7 +829,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef void find_split(self, int *tr):
+    cdef void find_split(self, int *tr,):
         # First look for zeros
         cdef int i, center, ax
         cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
@@ -837,9 +837,9 @@
         axes = np.argsort(self.dd)[::-1]
         cdef np.int64_t *sig
         for axi in range(3):
-            ax = axes[axi]
-            center = self.dimensions[ax] / 2
-            sig = self.sigs[ax]
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
             for i in range(self.dimensions[ax]):
                 if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
                     #print "zero: %s (%s)" % (i, self.dimensions[ax])
@@ -871,6 +871,61 @@
         tr[0] = 1; tr[1] = ax; tr[2] = zcp
         return
 
+    @cython.wraparound(False)
+    cdef void find_split_center(self, int *tr,):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef int flip
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.int64_t *sig
+        for axi in range(3):
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
+            #frequently get stuck with many zeroes near the edge of the grid
+            #let's start from the middle, working out
+            for j in range(self.dimensions[ax]/2):
+                flip = 1
+                i = self.dimensions[ax]/2+j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                i = self.dimensions[ax]/2-j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                    
+                
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def get_properties(self):
@@ -970,21 +1025,29 @@
         hilbert_indices[o] = h
     return hilbert_indices
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind,
+def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind, 
                             np.ndarray[np.int64_t, ndim=1] uind,
                             np.ndarray[np.int64_t, ndim=2] lefts,
                             np.ndarray[np.int64_t, ndim=2] files):
+    #ind are the hilbert indices 
+    #uind are the unique hilbert indices                        
+    #count[n] track of how many times the nth index of uind occurs in ind
+    
     cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
     cdef int n, i
     cdef np.int64_t mi, mui
+    
+    #fill in the count array
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
             if uind[n] == mi:
                 count[n] += 1
                 break
+    
     cdef np.int64_t **alefts
     cdef np.int64_t **afiles
     afiles = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
@@ -994,6 +1057,9 @@
     cdef np.ndarray[np.int64_t, ndim=2] left
     all_locations = []
     all_lefts = []
+    
+    #having measure the repetition of each hilbert index,
+    #we can know declare how much memory we will use
     for n in range(uind.shape[0]):
         locations = np.zeros((count[n], 6), 'int64')
         left = np.zeros((count[n], 3), 'int64')
@@ -1002,7 +1068,11 @@
         afiles[n] = <np.int64_t *> locations.data
         alefts[n] = <np.int64_t *> left.data
         li[n] = 0
+    
     cdef int fi
+    #now arrange all_locations and all_lefts sequentially
+    #such that when they return to python
+    #the 1d array mutates into a list of lists?
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
@@ -1022,19 +1092,31 @@
         np.ndarray[np.int64_t, ndim=1] ind,
         np.ndarray[np.int64_t, ndim=2] left_index,
         np.ndarray[np.int64_t, ndim=2] fl,
-        int num_deep = 0):
-    cdef float min_eff = 0.1
+        int num_deep = 0,
+        float min_eff = 0.1,
+        int use_center=0,
+        long split_on_vol = 0):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 40:
+    cdef long volume  =0
+    cdef int max_depth = 40
+    volume = dims[0]*dims[1]*dims[2]
+    if split_on_vol>0:
+        if volume < split_on_vol:
+            return [psg]
+    if num_deep > max_depth:
         psg.efficiency = min_eff
         return [psg]
-    if psg.efficiency > min_eff or psg.efficiency < 0.0:
+    if (psg.efficiency > min_eff or psg.efficiency < 0.0):
         return [psg]
-    psg.find_split(tr)
+    if not use_center:    
+        psg.find_split(tr) #default
+    else:
+        psg.find_split_center(tr)    
+        
     tt = tr[0]
     ax = tr[1]
     fp = tr[2]
@@ -1059,7 +1141,7 @@
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
@@ -1067,7 +1149,7 @@
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_r = [R]
     return rv_r + rv_l


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -125,6 +125,23 @@
     def _detect_fields(self):
         self.field_list = self.tree_proxy.field_names[:]
     
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=False, particle_type=True)
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -94,22 +94,6 @@
 except ImportError:
     pass
 
-def __memory_fallback(pid):
-    """
-    Get process memory from a system call.
-    """
-    value = os.popen('ps -o rss= -p %d' % pid).read().strip().split('\n')
-    if len(value) == 1: return float(value[0])
-    value.pop(0)
-    for line in value:
-        online = line.split()
-        if online[0] != pid: continue
-        try:
-            return float(online[2])
-        except:
-            return 0.0
-    return 0.0
-
 def get_memory_usage():
     """
     Returning resident size in megabytes
@@ -118,10 +102,10 @@
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return __memory_fallback(pid) / 1024
+        return -1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return __memory_fallback(pid) / 1024
+        return -1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs
@@ -568,10 +552,11 @@
 def parallel_profile(prefix):
     import cProfile
     from yt.config import ytcfg
-    fn = "%s_%04i.cprof" % (prefix,
+    fn = "%s_%04i_%04i.cprof" % (prefix,
+                ytcfg.getint("yt", "__topcomm_parallel_size"),
                 ytcfg.getint("yt", "__topcomm_parallel_rank"))
     p = cProfile.Profile()
     p.enable()
-    yield
+    yield fn
     p.disable()
     p.dump_stats(fn)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -26,6 +26,7 @@
 
 import json
 import os
+import stat
 import cStringIO
 import logging
 import uuid
@@ -276,7 +277,7 @@
         for i in range(30):
             # Check for stop
             if self.stopped: return {'type':'shutdown'} # No race condition
-            if self.payload_handler.event.wait(1): # One second timeout
+            if self.payload_handler.event.wait(0.01): # One second timeout
                 return self.payload_handler.deliver_payloads()
         if self.debug: print "### Heartbeat ... finished: %s" % (time.ctime())
         return []
@@ -459,6 +460,36 @@
         return command
 
     @lockit
+    def load(self, base_dir, filename):
+        pp = os.path.join(base_dir, filename)
+        funccall = "pfs.append(load('%s'))" % pp
+        self.execute(funccall)
+        return []
+
+    def file_listing(self, base_dir, sub_dir):
+        if base_dir == "":
+            cur_dir = os.getcwd()
+        elif sub_dir == "":
+            cur_dir = base_dir
+        else:
+            cur_dir = os.path.join(base_dir, sub_dir)
+            cur_dir = os.path.abspath(cur_dir)
+        if not os.path.isdir(cur_dir):
+            return {'change':False}
+        fns = os.listdir(cur_dir)
+        results = [("..", 0, "directory")]
+        for fn in sorted((os.path.join(cur_dir, f) for f in fns)):
+            if not os.access(fn, os.R_OK): continue
+            if os.path.isfile(fn):
+                size = os.path.getsize(fn)
+                t = "file"
+            else:
+                size = 0
+                t = "directory"
+            results.append((os.path.basename(fn), size, t))
+        return dict(objs = results, cur_dir=cur_dir)
+
+    @lockit
     def create_phase(self, objname, field_x, field_y, field_z, weight):
         if weight == "None": weight = None
         else: weight = "'%s'" % (weight)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/gui/reason/html/images/file_dialog_directory.png
Binary file yt/gui/reason/html/images/file_dialog_directory.png has changed


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/gui/reason/html/images/file_dialog_file.png
Binary file yt/gui/reason/html/images/file_dialog_file.png has changed


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/gui/reason/html/index.html
--- a/yt/gui/reason/html/index.html
+++ b/yt/gui/reason/html/index.html
@@ -78,7 +78,8 @@
     <!-- FONTS --><!-- These will get pulled from Google, but Google might not be accessible.
          In that case, it will default to whatever is in the family. -->
-    <link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css?family=Inconsolata">
+    <!--<link rel="stylesheet" type="text/css"
+    href="http://fonts.googleapis.com/css?family=Inconsolata">--><!-- LEAFLET STUFF --><script type="text/javascript" src="leaflet/leaflet.js"></script>
@@ -103,6 +104,9 @@
     <script type="text/javascript" src="js/menu_items.js"></script><!-- THE PLOT WINDOW FUNCTIONS -->
+    <script type="text/javascript" src="js/file_open.js"></script>
+
+    <!-- THE PLOT WINDOW FUNCTIONS --><script type="text/javascript" src="js/widget_plotwindow.js"></script><!-- THE GRID VIEWER FUNCTIONS -->


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/gui/reason/html/js/file_open.js
--- /dev/null
+++ b/yt/gui/reason/html/js/file_open.js
@@ -0,0 +1,146 @@
+/**********************************************************************
+A file opener
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+
+function open_file() {
+    var filestore = new Ext.data.ArrayStore({
+      fields: ['filename', 
+               {name:'size', type:'float'},
+               'type'
+      ]
+    });
+    var cur_dir;
+    function fillStore(f, a){
+        if(a.status == false){
+          Ext.Msg.alert("Error", "Something has gone wrong.");
+          return;
+        }
+        if(a.result['change'] == false) {
+          win.get("current_file").setValue(cur_dir);
+          return;
+        }
+        filestore.removeAll();
+        var rec = [];
+        filestore.loadData(a.result['objs']);
+        cur_dir = a.result['cur_dir'];
+        win.get("current_file").setValue(cur_dir);
+    }
+
+    var win = new Ext.Window({
+        layout:'vbox',
+        layoutConfig: {
+            align: 'stretch',
+            pack: 'start',
+            defaultMargins: "5px 5px 5px 5px",
+        },
+        width:540,
+        height:480,
+        modal:true,
+        resizable:true,
+        draggable:true,
+        title:'Open File',
+        items: [
+            { xtype: 'textfield',
+              id: 'current_file',
+              listeners: {
+                specialkey: function(f, e) {
+                  if (e.getKey() != e.ENTER) { return; }
+                  yt_rpc.ExtDirectREPL.file_listing(
+                        {base_dir:f.getValue(), sub_dir:''}, fillStore);
+                }
+              }
+            }, {
+              xtype:'listview',
+              id: 'file_listing',
+              store: filestore ,
+              singleSelect:true,
+              emptyText: 'No images to display',
+              flex: 1.0,
+              columns: [
+              {
+                  header: 'Type',
+                  width: 0.1,
+                  tpl: '<img src="images/file_dialog_{type}.png" width=16 height=16>',
+                  dataIndex: 'type'
+              },{
+                  header: 'Filename',
+                  width: .75,
+                  dataIndex: 'filename'
+              },{
+                  header: 'Size',
+                  dataIndex: 'size',
+                  tpl: '{size:fileSize}',
+                  align: 'right',
+                  cls: 'listview-filesize'
+              }],
+              listeners: {
+                dblclick: function(view, index, node, e) {
+                    var fileRecord = filestore.getAt(index).data;
+                    if (fileRecord.type == 'directory') {
+                      yt_rpc.ExtDirectREPL.file_listing(
+                            {base_dir:cur_dir, sub_dir:fileRecord.filename},
+                            fillStore);
+                    } else {
+                      yt_rpc.ExtDirectREPL.load(
+                            {base_dir:cur_dir, filename:fileRecord.filename},
+                            handle_result);
+                      win.destroy();
+                    }
+                },
+                selectionchange: function(view, index, node, e) {
+                },
+              },
+            }, {
+              xtype: 'panel',
+              height: 40,
+              layout: 'hbox',
+              layoutConfig: {
+                  align: 'stretch',
+                  pack: 'start',
+                  defaultMargins: "5px 5px 5px 5px",
+              },
+              items: [
+                { flex: 1.0, xtype: 'button', text: 'Cancel',
+                    handler: function(b, e) { win.destroy(); } },
+                { flex: 1.0, xtype: 'button', text: 'Load',
+                    handler: function(b, e) {
+                      filename = "";
+                      var fl = win.get("file_listing");
+                      if (fl.getSelectionCount() == 1) {
+                        filename = fl.getSelectedRecords()[0].data.filename;
+                      }
+                      yt_rpc.ExtDirectREPL.load(
+                            {base_dir:cur_dir, filename:filename},
+                            handle_result);
+                      win.destroy();
+                    }
+                },
+              ],
+            },
+        ],
+    });
+    yt_rpc.ExtDirectREPL.file_listing(
+          {base_dir:"", sub_dir:""}, fillStore);
+    win.show(this);
+}


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/gui/reason/html/js/menu_items.js
--- a/yt/gui/reason/html/js/menu_items.js
+++ b/yt/gui/reason/html/js/menu_items.js
@@ -33,7 +33,11 @@
     text: 'Menu',
     id: 'main_menu',
     menu: [
-           {xtype:'menuitem', text: 'Open', disabled: true},
+           {xtype:'menuitem', text: 'Open File', 
+               handler: function(b,e) {
+                  open_file()
+               },
+           },
            {xtype:'menuitem', text: 'Open Directory', disabled: true},
            {xtype: 'menuseparator'},
            {xtype:'menuitem', text: 'Save Script',


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -67,7 +67,8 @@
     add_quantity, quantity_info
 
 from yt.frontends.enzo.api import \
-    EnzoStaticOutput, EnzoStaticOutputInMemory, EnzoFieldInfo, \
+    EnzoStaticOutput, EnzoStaticOutputInMemory, \
+    EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
 from yt.frontends.castro.api import \
@@ -128,7 +129,7 @@
 for name, cls in callback_registry.items():
     exec("%s = cls" % name)
 
-from yt.convenience import all_pfs, max_spheres, load, projload
+from yt.convenience import load, projload
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -8,7 +8,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('yt', parent_package, top_path)
     config.add_subpackage('analysis_modules')
-    config.add_subpackage('astro_objects')
     config.add_subpackage('data_objects')
     config.add_subpackage('frontends')
     config.add_subpackage('gui')


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -124,6 +124,9 @@
     #sys.argv = [a for a in unparsed_args]
     if opts.parallel:
         parallel_capable = turn_on_parallelism()
+    subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
 else:
     subparsers = parser.add_subparsers(title="subcommands",
                         dest='subcommands',


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -1,8 +1,10 @@
 """
-Simle integrators for the radiative transfer equation
+Simple integrators for the radiative transfer equation
 
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: CASA/University of Colorado
+Author: Christopher Moody <juxtaposicion at gmail.com>
+Affiliation: cemoody at ucsc.edu
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008 Matthew Turk.  All Rights Reserved.
@@ -111,3 +113,73 @@
         ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
         sample[i] = arr[ind[0], ind[1], ind[2]]
     return sample
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    cdef long level_max = np.max(levels)
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+    return assign
+
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.int64_t level_max, 
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    #Todo: instead of iterating every particles, could use kdtree 
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    index_lists = []
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            index_list = []
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+                            index_list += j,
+            index_lists += index_list,
+    return assign,index_lists
+
+    
+    


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -27,21 +27,6 @@
 cimport numpy as np
 cimport cython
 
-cdef extern from "math.h":
-    double exp(double x)
-    float expf(float x)
-    long double expl(long double x)
-    double floor(double x)
-    double ceil(double x)
-    double fmod(double x, double y)
-    double log2(double x)
-    long int lrint(double x)
-    double fabs(double x)
-    double cos(double x)
-    double sin(double x)
-    double asin(double x)
-    double acos(double x)
-
 cdef class position:
     cdef public int output_pos, refined_pos
     def __cinit__(self):
@@ -81,6 +66,7 @@
                             np.ndarray[np.float64_t, ndim=2] output,
                             np.ndarray[np.int32_t, ndim=1] refined,
                             OctreeGridList grids):
+    #cdef int s = curpos
     cdef int i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
@@ -93,8 +79,13 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
     for i_off in range(i_f):
-        i = i_off + i_i
+        i = i_off + i_i #index
         cx = (leftedges[0] + i*dx)
         for j_off in range(j_f):
             j = j_off + j_i
@@ -118,19 +109,20 @@
                     child_i = int((cx - child_leftedges[0])/child_dx)
                     child_j = int((cy - child_leftedges[1])/child_dx)
                     child_k = int((cz - child_leftedges[2])/child_dx)
-                    s = RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
+                    # s = Recurs.....
+                    RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
                                         curpos, ci - grid.offset, output, refined, grids)
-    return s
 
+ at cython.boundscheck(False)
 def RecurseOctreeByLevels(int i_i, int j_i, int k_i,
                           int i_f, int j_f, int k_f,
-                          np.ndarray[np.int64_t, ndim=1] curpos,
+                          np.ndarray[np.int32_t, ndim=1] curpos,
                           int gi, 
                           np.ndarray[np.float64_t, ndim=2] output,
-                          np.ndarray[np.int64_t, ndim=2] genealogy,
+                          np.ndarray[np.int32_t, ndim=2] genealogy,
                           np.ndarray[np.float64_t, ndim=2] corners,
                           OctreeGridList grids):
-    cdef np.int64_t i, i_off, j, j_off, k, k_off, ci, fi
+    cdef np.int32_t i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
     cdef OctreeGrid grid = grids[gi-1]
@@ -143,11 +135,11 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
-    cdef np.int64_t cp
-    cdef int s = 0
+    cdef int cp
     for i_off in range(i_f):
         i = i_off + i_i
         cx = (leftedges[0] + i*dx)
+        if i_f > 2: print k, cz
         for j_off in range(j_f):
             j = j_off + j_i
             cy = (leftedges[1] + j*dx)
@@ -167,15 +159,16 @@
                     child_grid = grids[ci-1]
                     child_dx = child_grid.dx[0]
                     child_leftedges = child_grid.left_edges
-                    child_i = lrint((cx-child_leftedges[0])/child_dx)
-                    child_j = lrint((cy-child_leftedges[1])/child_dx)
-                    child_k = lrint((cz-child_leftedges[2])/child_dx)
+                    child_i = int((cx-child_leftedges[0])/child_dx)
+                    child_j = int((cy-child_leftedges[1])/child_dx)
+                    child_k = int((cz-child_leftedges[2])/child_dx)
                     # set current child id to id of next cell to examine
                     genealogy[cp, 0] = curpos[level+1] 
                     # set next parent id to id of current cell
                     genealogy[curpos[level+1]:curpos[level+1]+8, 1] = cp
-                    RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
+                    s = RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
                                               curpos, ci, output, genealogy,
                                               corners, grids)
                 curpos[level] += 1
-    return
+    return s
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -142,8 +142,6 @@
     # points to the start of the record *following* the reading of iOctFree and
     # nOct.  For those following along at home, we only need to read:
     #   iOctPr, iOctLv
-    print min_level, max_level 
-    
     cdef int nchild = 8
     cdef int i, Lev, cell_ind, iOct, nLevel, nLevCells, ic1
     cdef np.int64_t next_record
@@ -170,7 +168,7 @@
         fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
         iOct = iHOLL[Level] - 1
         nLevel = iNOLL[Level]
-        print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
+        #print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
         #print ftell(f)
         for ic1 in range(nLevel):
             iOctMax = max(iOctMax, iOct)
@@ -218,7 +216,7 @@
         
         #find the length of all of the children section
         child_record = ftell(f) +  (next_record+2*sizeof(int))*nLevel*nchild
-        print 'Skipping over hydro vars', ftell(f), child_record
+        #print 'Skipping over hydro vars', ftell(f), child_record
         fseek(f, child_record, SEEK_SET)
         
         # for ic1 in range(nLevel * nchild):
@@ -288,9 +286,9 @@
 def read_art_grid(int varindex, 
               np.ndarray[np.int64_t, ndim=1] start_index,
               np.ndarray[np.int32_t, ndim=1] grid_dims,
-              np.ndarray[np.float64_t, ndim=3] data,
-              np.ndarray[np.int32_t, ndim=3] filled,
-              np.ndarray[np.float64_t, ndim=2] level_data,
+              np.ndarray[np.float32_t, ndim=3] data,
+              np.ndarray[np.uint8_t, ndim=3] filled,
+              np.ndarray[np.float32_t, ndim=2] level_data,
               int level, int ref_factor,
               component_grid_info):
     cdef int gi, i, j, k, domain, offset, grid_id
@@ -312,7 +310,7 @@
         domain = ogrid_info[0]
         #print "Loading", domain, ogrid_info
         grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:]
+        og_start_index = ogrid_info[3:6] #the oct left edge
         for i in range(2*ref_factor):
             di = i + og_start_index[0] * ref_factor
             if di < start_index[0] or di >= end_index[0]: continue
@@ -350,6 +348,30 @@
     return to_fill
 
 @cython.cdivision(True)
+ at cython.boundscheck(True)
+ at cython.wraparound(False)
+def fill_child_mask(np.ndarray[np.int64_t, ndim=2] file_locations,
+                    np.ndarray[np.int64_t, ndim=1] grid_le,
+                    np.ndarray[np.uint8_t, ndim=4] art_child_masks,
+                    np.ndarray[np.uint8_t, ndim=3] child_mask):
+
+    #loop over file_locations, for each row exracting the index & LE
+    #of the oct we will pull pull from art_child_masks
+    #then use the art_child_masks info to fill in child_mask
+    cdef int i,ioct,x,y,z
+    cdef int nocts = file_locations.shape[0]
+    cdef int lex,ley,lez
+    for i in range(nocts):
+        ioct = file_locations[i,1] #from fortran to python indexing?
+        lex = file_locations[i,3] - grid_le[0] #the oct left edge x
+        ley = file_locations[i,4] - grid_le[1]
+        lez = file_locations[i,5] - grid_le[2]
+        for x in range(2):
+            for y in range(2):
+                for z in range(2):
+                    child_mask[lex+x,ley+y,lez+z] = art_child_masks[ioct,x,y,z]
+
+ at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def read_castro_particles(char *fn, int offset, int fieldindex, int nfields,


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -287,6 +287,7 @@
         uniquedims[i] = <np.float64_t *> \
                 alloca(2*n_grids * sizeof(np.float64_t))
     my_max = 0
+    best_dim = -1
     for dim in range(3):
         n_unique = 0
         uniques = uniquedims[dim]


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1023,7 +1023,7 @@
                     # This node belongs to someone else, move along
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
-                
+
             # If we are down to one grid, we are either in it or the parent grid
             if len(current_node.grids) == 1:
                 thisgrid = current_node.grids[0]
@@ -1042,11 +1042,12 @@
                         if len(children) > 0:
                             current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
-                            # print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
+                            #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
                             continue
 
                     # Else make a leaf node (brick container)
+                    #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
                     volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
@@ -1055,13 +1056,15 @@
 
             # If we don't have any grids, this volume belongs to the parent        
             if len(current_node.grids) == 0:
+                #print 'This volume does not have a child grid, so it belongs to my parent!'
                 set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
-                # print 'This volume does not have a child grid, so it belongs to my parent!'
                 current_node, previous_node = self.step_depth(current_node, previous_node)
                 continue
 
             # If we've made it this far, time to build a dividing node
-            self._build_dividing_node(current_node)
+            # print 'Building dividing node'
+            # Continue if building failed
+            if self._build_dividing_node(current_node): continue
 
             # Step to the nest node in a depth-first traversal.
             current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1073,10 +1076,10 @@
         '''
         Given a node, finds all the choices for the next dividing plane.  
         '''
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1086,8 +1089,19 @@
         Makes the current node a dividing node, and initializes the
         left and right children.
         '''
-        
-        data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
+
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        best_dim, split, less_ids, greater_ids = \
+            kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+
+        del data
+
+        # Here we break out if no unique grids were found. In this case, there
+        # are likely overlapping grids, and we assume that the first grid takes
+        # precedence.  This is fragile.
+        if best_dim == -1:
+            current_node.grids = [current_node.grids[0]]
+            return 1
 
         current_node.split_ax = best_dim
         current_node.split_pos = split
@@ -1095,7 +1109,7 @@
         #greater_ids0 = (split < data[:,1])
         #assert(na.all(less_ids0 == less_ids))
         #assert(na.all(greater_ids0 == greater_ids))
-        
+
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
                                              parent_grid=current_node.parent_grid,
@@ -1114,7 +1128,9 @@
         # build to work.  The other deletions are just to save memory.
         del current_node.grids, current_node.parent_grid, current_node.brick,\
             current_node.li, current_node.ri, current_node.dims
-        
+
+        return 0
+
     def traverse(self, back_center, front_center, image):
         r"""Traverses the kd-Tree, casting the partitioned grids from back to
             front.


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -82,11 +82,15 @@
         if cls.npfs > 1:
             self(args)
         else:
-            if len(getattr(args, "pf", [])) > 1:
+            pf_args = getattr(args, "pf", [])
+            if len(pf_args) > 1:
                 pfs = args.pf
                 for pf in pfs:
                     args.pf = pf
                     self(args)
+            elif len(pf_args) == 0:
+                pfs = []
+                self(args)
             else:
                 args.pf = getattr(args, 'pf', [None])[0]
                 self(args)
@@ -105,6 +109,8 @@
 _common_options = dict(
     pf      = dict(short="pf", action=GetParameterFiles,
                    nargs="+", help="Parameter files to run on"),
+    opf     = dict(action=GetParameterFiles, dest="pf",
+                   nargs="*", help="(Optional) Parameter files to run on"),
     axis    = dict(short="-a", long="--axis",
                    action="store", type=int,
                    dest="axis", default=4,
@@ -1269,7 +1275,8 @@
                  help="At startup, find all *.hierarchy files in the CWD"),
             dict(short="-d", long="--debug", action="store_true",
                  default = False, dest="debug",
-                 help="Add a debugging mode for cell execution")
+                 help="Add a debugging mode for cell execution"),
+            "opf"
             )
     description = \
         """
@@ -1315,12 +1322,12 @@
         from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
         hr = ExtDirectREPL(base_extjs_path)
         hr.debug = PayloadHandler.debug = args.debug
+        command_line = ["pfs = []"]
         if args.find:
             # We just have to find them and store references to them.
-            command_line = ["pfs = []"]
             for fn in sorted(glob.glob("*/*.hierarchy")):
                 command_line.append("pfs.append(load('%s'))" % fn[:-10])
-            hr.execute("\n".join(command_line))
+        hr.execute("\n".join(command_line))
         bottle.debug()
         uuid_serve_functions(open_browser=args.open_browser,
                     port=int(args.port), repl=hr)
@@ -1430,7 +1437,7 @@
         if 'upload' in rv and 'links' in rv['upload']:
             print
             print "Image successfully uploaded!  You can find it at:"
-            print "    %s" % (rv['upload']['links']['imgur_page'])
+            print "    %s" % (rv['upload']['links']['original'])
             print
             print "If you'd like to delete it, visit this page:"
             print "    %s" % (rv['upload']['links']['delete_page'])


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -33,6 +33,15 @@
 
 # Data access exceptions:
 
+class YTOutputNotIdentified(YTException):
+    def __init__(self, args, kwargs):
+        self.args = args
+        self.kwargs = kwargs
+
+    def __str__(self):
+        return "Supplied %s %s, but could not load!" % (
+            self.args, self.kwargs)
+
 class YTSphereTooSmall(YTException):
     def __init__(self, pf, radius, smallest_cell):
         YTException.__init__(self, pf)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/__init__.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/__init__.py
@@ -0,0 +1,2 @@
+from conversion import *
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/conversion/__init__.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/__init__.py
@@ -0,0 +1,3 @@
+from conversion_abc import Converter
+from conversion_athena import AthenaDistributedConverter, AthenaConverter
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/conversion/conversion_abc.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/conversion_abc.py
@@ -0,0 +1,7 @@
+
+class Converter(object):
+    def __init__(self, basename, outname=None):
+        self.basename = basename
+        self.outname = outname
+    def convert(self):
+        pass


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/conversion/conversion_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -0,0 +1,503 @@
+import os
+import weakref
+import numpy as na
+import h5py as h5
+from conversion_abc import *
+from glob import glob
+from collections import \
+    defaultdict
+from string import \
+    strip, \
+    rstrip
+from stat import \
+    ST_CTIME
+
+translation_dict = {}
+translation_dict['density'] = 'density'
+translation_dict['total_energy'] = 'specific_energy'
+translation_dict['velocity_x'] = 'velocity_x'
+translation_dict['velocity_y'] = 'velocity_y'
+translation_dict['velocity_z'] = 'velocity_z'
+translation_dict['cell_centered_B_x'] = 'mag_field_x'
+translation_dict['cell_centered_B_y'] = 'mag_field_y'
+translation_dict['cell_centered_B_z'] = 'mag_field_z'
+
+class AthenaDistributedConverter(Converter):
+    def __init__(self, basename, outname=None, source_dir=None, field_conversions=None):
+        self.fields = []
+        self.current_time=0.0
+        name = basename.split('.')
+        self.ddn = int(name[1])
+        if source_dir is None:
+            source_dir = './'
+        self.source_dir = source_dir+'/'
+        self.basename = name[0]
+        if outname is None:
+            outname = self.basename+'.%04i'%self.ddn+'.gdf'
+        self.outname = outname
+        if field_conversions is None:
+            field_conversions = {}
+        self.field_conversions = field_conversions
+        self.handle = None
+
+    def parse_line(self,line, grid):
+    #    print line
+        # grid is a dictionary
+        splitup = line.strip().split()
+        if "vtk" in splitup:
+            grid['vtk_version'] = splitup[-1]
+        elif "Really" in splitup:
+            grid['time'] = splitup[-1]
+            self.current_time = grid['time']
+        elif 'PRIMITIVE' in splitup:
+            grid['time'] = float(splitup[4].rstrip(','))
+            grid['level'] = int(splitup[6].rstrip(','))
+            grid['domain'] = int(splitup[8].rstrip(','))
+            self.current_time = grid['time']
+        elif "DIMENSIONS" in splitup:
+            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        elif "ORIGIN" in splitup:
+            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        elif "SPACING" in splitup:
+            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        elif "CELL_DATA" in splitup:
+            grid["ncells"] = int(splitup[-1])
+        elif "SCALARS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'scalar'
+        elif "VECTORS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'vector'
+
+    def write_gdf_field(self, fn, grid_number, field, data):
+        f = self.handle
+        ## --------- Store Grid Data --------- ##
+        if 'grid_%010i'%grid_number not in f['data'].keys():
+            g = f['data'].create_group('grid_%010i'%grid_number)
+        else:
+            g = f['data']['grid_%010i'%grid_number]
+        name = field
+        try:
+            name = translation_dict[name]
+        except:
+            pass
+        # print 'Writing %s' % name
+        if not name in g.keys(): 
+            g.create_dataset(name,data=data)
+        
+
+
+    def read_and_write_hierarchy(self,basename, ddn, gdf_name):
+        """ Read Athena legacy vtk file from multiple cpus """
+        proc_names = glob(self.source_dir+'id*')
+        #print 'Reading a dataset from %i Processor Files' % len(proc_names)
+        N = len(proc_names)
+        grid_dims = na.empty([N,3],dtype='int64')
+        grid_left_edges = na.empty([N,3],dtype='float64')
+        grid_dds = na.empty([N,3],dtype='float64')
+        grid_levels = na.zeros(N,dtype='int64')
+        grid_parent_ids = -1*na.ones(N,dtype='int64')
+        grid_particle_counts = na.zeros([N,1],dtype='int64')
+
+        for i in range(N):
+            if i == 0:
+                fn = self.source_dir+'id%i/'%i + basename + '.%04i'%ddn + '.vtk'
+            else:
+                fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk'
+
+            print 'Reading file %s' % fn
+            f = open(fn,'rb')
+            grid = {}
+            grid['read_field'] = None
+            grid['read_type'] = None
+            table_read=False
+            line = f.readline()
+            while grid['read_field'] is None:
+                self.parse_line(line, grid)
+                if "SCALAR" in line.strip().split():
+                    break
+                if "VECTOR" in line.strip().split():
+                    break
+                if 'TABLE' in line.strip().split():
+                    break
+                if len(line) == 0: break
+                del line
+                line = f.readline()
+
+            if len(line) == 0: break
+            
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                grid['dimensions'] -= 1
+                grid['dimensions'][grid['dimensions']==0]=1
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                print 'product of dimensions %i not equal to number of cells %i' % \
+                      (na.prod(grid['dimensions']), grid['ncells'])
+                raise TypeError
+
+            # Append all hierachy info before reading this grid's data
+            grid_dims[i]=grid['dimensions']
+            grid_left_edges[i]=grid['left_edge']
+            grid_dds[i]=grid['dds']
+            #grid_ncells[i]=grid['ncells']
+            del grid
+
+            f.close()
+            del f
+        f = self.handle 
+
+        ## --------- Begin level nodes --------- ##
+        g = f.create_group('gridded_data_format')
+        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['data_software']='athena'
+        data_g = f.create_group('data')
+        field_g = f.create_group('field_types')
+        part_g = f.create_group('particle_types')
+        pars_g = f.create_group('simulation_parameters')
+
+
+        gles = grid_left_edges
+        gdims = grid_dims
+        dle = na.min(gles,axis=0)
+        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        glis = ((gles - dle)/grid_dds).astype('int64')
+        gris = glis + gdims
+
+        ddims = (dre-dle)/grid_dds[0]
+
+        # grid_left_index
+        gli = f.create_dataset('grid_left_index',data=glis)
+        # grid_dimensions
+        gdim = f.create_dataset('grid_dimensions',data=gdims)
+
+        # grid_level
+        level = f.create_dataset('grid_level',data=grid_levels)
+
+        ## ----------QUESTIONABLE NEXT LINE--------- ##
+        # This data needs two dimensions for now. 
+        part_count = f.create_dataset('grid_particle_count',data=grid_particle_counts)
+
+        # grid_parent_id
+        pids = f.create_dataset('grid_parent_id',data=grid_parent_ids)
+
+        ## --------- Done with top level nodes --------- ##
+
+        pars_g.attrs['refine_by'] = na.int64(1)
+        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['domain_dimensions'] = ddims
+        pars_g.attrs['current_time'] = self.current_time
+        pars_g.attrs['domain_left_edge'] = dle
+        pars_g.attrs['domain_right_edge'] = dre
+        pars_g.attrs['unique_identifier'] = 'athenatest'
+        pars_g.attrs['cosmological_simulation'] = na.int64(0)
+        pars_g.attrs['num_ghost_zones'] = na.int64(0)
+        pars_g.attrs['field_ordering'] = na.int64(1)
+        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+
+        # Extra pars:
+        # pars_g.attrs['n_cells'] = grid['ncells']
+        pars_g.attrs['vtk_version'] = 1.0
+
+        # Add particle types
+        # Nothing to do here
+
+        # Add particle field attributes
+        #f.close()
+
+
+    def read_and_write_data(self, basename, ddn, gdf_name):
+        proc_names = glob(self.source_dir+'id*')
+        #print 'Reading a dataset from %i Processor Files' % len(proc_names)
+        N = len(proc_names)
+        for i in range(N):
+            if i == 0:
+                fn = self.source_dir+'id%i/'%i + basename + '.%04i'%ddn + '.vtk'
+            else:
+                fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk'
+            f = open(fn,'rb')
+            #print 'Reading data from %s' % fn
+            line = f.readline()
+            while line is not '':
+                # print line
+                if len(line) == 0: break
+                splitup = line.strip().split()
+
+                if "DIMENSIONS" in splitup:
+                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    line = f.readline()
+                    continue
+                elif "CELL_DATA" in splitup:
+                    grid_ncells = int(splitup[-1])
+                    line = f.readline()
+                    if na.prod(grid_dims) != grid_ncells:
+                        grid_dims -= 1
+                        grid_dims[grid_dims==0]=1
+                    if na.prod(grid_dims) != grid_ncells:
+                        print 'product of dimensions %i not equal to number of cells %i' % \
+                              (na.prod(grid_dims), grid_ncells)
+                        raise TypeError
+                    break
+                else:
+                    del line
+                    line = f.readline()
+            read_table = False
+            while line is not '':
+                if len(line) == 0: break
+                splitup = line.strip().split()
+                if 'SCALARS' in splitup:
+                    field = splitup[1]
+                    if not read_table:
+                        line = f.readline() # Read the lookup table line
+                        read_table = True
+                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    if i == 0:
+                        self.fields.append(field)
+                    # print 'writing field %s' % field
+                    self.write_gdf_field(gdf_name, i, field, data)
+                    read_table=False
+
+                elif 'VECTORS' in splitup:
+                    field = splitup[1]
+                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data_x = data[0::3].reshape(grid_dims,order='F')
+                    data_y = data[1::3].reshape(grid_dims,order='F')
+                    data_z = data[2::3].reshape(grid_dims,order='F')
+                    if i == 0:
+                        self.fields.append(field+'_x')
+                        self.fields.append(field+'_y')
+                        self.fields.append(field+'_z')
+
+                    # print 'writing field %s' % field
+                    self.write_gdf_field(gdf_name, i, field+'_x', data_x)
+                    self.write_gdf_field(gdf_name, i, field+'_y', data_y)
+                    self.write_gdf_field(gdf_name, i, field+'_z', data_z)
+                    del data, data_x, data_y, data_z
+                del line
+                line = f.readline()
+            f.close()
+            del f
+
+        f = self.handle 
+        field_g = f['field_types']
+        # Add Field Attributes
+        for name in self.fields:
+            tname = name
+            try:
+                tname = translation_dict[name]
+            except:
+                pass
+            this_field = field_g.create_group(tname)
+            if name in self.field_conversions.keys():
+                this_field.attrs['field_to_cgs'] = self.field_conversions[name]
+            else:
+                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            
+
+    def convert(self, hierarchy=True, data=True):
+        self.handle = h5.File(self.outname, 'a')
+        if hierarchy:
+            self.read_and_write_hierarchy(self.basename, self.ddn ,self.outname)
+        if data:
+            self.read_and_write_data(self.basename, self.ddn ,self.outname)
+        self.handle.close()
+
+class AthenaConverter(Converter):
+    def __init__(self, basename, outname=None, field_conversions=None):
+        self.fields = []
+        self.basename = basename
+        name = basename.split('.')
+        fn = '%s.%04i'%(name[0],int(name[1]))
+        self.ddn = int(name[1])
+        self.basename = fn
+        if outname is None:
+            outname = fn+'.gdf'
+        self.outname = outname
+        if field_conversions is None:
+            field_conversions = {}
+        self.field_conversions = field_conversions
+
+
+    def parse_line(self, line, grid):
+    #    print line
+        # grid is a dictionary
+        splitup = line.strip().split()
+        if "vtk" in splitup:
+            grid['vtk_version'] = splitup[-1]
+        elif "Really" in splitup:
+            grid['time'] = splitup[-1]
+        elif "DIMENSIONS" in splitup:
+            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        elif "ORIGIN" in splitup:
+            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        elif "SPACING" in splitup:
+            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        elif "CELL_DATA" in splitup:
+            grid["ncells"] = int(splitup[-1])
+        elif "SCALARS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'scalar'
+        elif "VECTORS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'vector'
+        
+    def read_grid(self, filename):
+        """ Read Athena legacy vtk file from single cpu """
+        f = open(filename,'rb')
+        #print 'Reading from %s'%filename
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while line is not '':
+            while grid['read_field'] is None:
+                self.parse_line(line, grid)
+                if grid['read_type'] is 'vector':
+                    break
+                if table_read is False:             
+                    line = f.readline()
+                if 'TABLE' in line.strip().split():
+                    table_read = True
+                if len(line) == 0: break
+            #    print line
+
+            if len(line) == 0: break
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                grid['dimensions'] -= 1
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                print 'product of dimensions %i not equal to number of cells %i' % \
+                      (na.prod(grid['dimensions']), grid['ncells'])
+                raise TypeError
+
+            if grid['read_type'] is 'scalar':
+                grid[grid['read_field']] = \
+                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                self.fields.append(grid['read_field'])
+            elif grid['read_type'] is 'vector':
+                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
+                grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
+                grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
+                self.fields.append(grid['read_field']+'_x')
+                self.fields.append(grid['read_field']+'_y')
+                self.fields.append(grid['read_field']+'_z')
+            else:
+                raise TypeError
+            grid['read_field'] = None
+            grid['read_type'] = None
+            line = f.readline()
+            if len(line) == 0: break
+        grid['right_edge'] = grid['left_edge']+grid['dds']*(grid['dimensions'])
+        return grid
+
+    def write_to_gdf(self, fn, grid):
+        f = h5.File(fn,'a')
+
+        ## --------- Begin level nodes --------- ##
+        g = f.create_group('gridded_data_format')
+        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['data_software']='athena'
+        data_g = f.create_group('data')
+        field_g = f.create_group('field_types')
+        part_g = f.create_group('particle_types')
+        pars_g = f.create_group('simulation_parameters')
+
+        dle = grid['left_edge'] # True only in this case of one grid for the domain
+        gles = na.array([grid['left_edge']])
+        gdims = na.array([grid['dimensions']])
+        glis = ((gles - dle)/grid['dds']).astype('int64')
+        gris = glis + gdims
+
+        # grid_left_index
+        gli = f.create_dataset('grid_left_index',data=glis)
+        # grid_dimensions
+        gdim = f.create_dataset('grid_dimensions',data=gdims)
+
+        levels = na.array([0]).astype('int64') # unigrid example
+        # grid_level
+        level = f.create_dataset('grid_level',data=levels)
+
+        ## ----------QUESTIONABLE NEXT LINE--------- ##
+        # This data needs two dimensions for now. 
+        n_particles = na.array([[0]]).astype('int64')
+        #grid_particle_count
+        part_count = f.create_dataset('grid_particle_count',data=n_particles)
+
+        # Assume -1 means no parent.
+        parent_ids = na.array([-1]).astype('int64')
+        # grid_parent_id
+        pids = f.create_dataset('grid_parent_id',data=parent_ids)
+
+        ## --------- Done with top level nodes --------- ##
+
+        f.create_group('hierarchy')
+
+        ## --------- Store Grid Data --------- ##
+
+        g0 = data_g.create_group('grid_%010i'%0)
+        for field in self.fields:
+            name = field
+            if field in translation_dict.keys():
+                name = translation_dict[name]
+            if not name in g0.keys(): 
+                g0.create_dataset(name,data=grid[field])
+
+        ## --------- Store Particle Data --------- ##
+
+        # Nothing to do
+
+        ## --------- Attribute Tables --------- ##
+
+        pars_g.attrs['refine_by'] = na.int64(1)
+        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['domain_dimensions'] = grid['dimensions']
+        try:
+            pars_g.attrs['current_time'] = grid['time']
+        except:
+            pars_g.attrs['current_time'] = 0.0
+        pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
+        pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
+        pars_g.attrs['unique_identifier'] = 'athenatest'
+        pars_g.attrs['cosmological_simulation'] = na.int64(0)
+        pars_g.attrs['num_ghost_zones'] = na.int64(0)
+        pars_g.attrs['field_ordering'] = na.int64(0)
+        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+
+        # Extra pars:
+        pars_g.attrs['n_cells'] = grid['ncells']
+        pars_g.attrs['vtk_version'] = grid['vtk_version']
+
+        # Add Field Attributes
+        for name in g0.keys():
+            tname = name
+            try:
+                tname = translation_dict[name]
+            except:
+                pass
+            this_field = field_g.create_group(tname)
+        if name in self.field_conversions.keys():
+            this_field.attrs['field_to_cgs'] = self.field_conversions[name]
+        else:
+            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+
+        # Add particle types
+        # Nothing to do here
+
+        # Add particle field attributes
+        f.close()
+
+    def convert(self):
+        grid = self.read_grid(self.basename+'.vtk')
+        self.write_to_gdf(self.outname,grid)
+        
+# import sys
+# if __name__ == '__main__':
+#     n = sys.argv[-1]
+#     n = n.split('.')
+#     fn = '%s.%04i'%(n[0],int(n[1]))
+#     grid = read_grid(fn+'.vtk')
+#     write_to_hdf5(fn+'.gdf',grid)
+    


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/conversion/setup.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('conversion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/docs/IRATE_notes.txt
--- /dev/null
+++ b/yt/utilities/grid_data_format/docs/IRATE_notes.txt
@@ -0,0 +1,39 @@
+Here is info from Erik Tollerud about the IRATE data format.
+
+The bitbucket project is at https://bitbucket.org/eteq/irate-format
+and I've posted a copy of the docs at
+http://www.physics.uci.edu/~etolleru/irate-docs/ , in particular
+http://www.physics.uci.edu/~etolleru/irate-docs/formatspec.html ,
+which details the actual requirements for data to fit in the format.
+As far as I can tell, the following steps are needed to make GDF fit
+inside the IRATE format:
+
+*move everything except "/simulation_parameters" into a group named "/GridData"
+
+*rename "/simulation_parameters" to "SimulationParameters"
+
+*remove the 'field_types' group (this is not absolutely necessary, but
+the convention we had in mind for IRATE is that the dataset names
+themselves (e.g. the datasets like /data/gridxxxxxx/density)  serve as
+the field definitions.
+
+* The unit information that's in 'field_types' should then be
+attributes in either "/GridData" or "/GridData/data" following the
+naming scheme e.g. "densityunitcgs" following the unit form given in
+the IRATE doc and an additional attribute e.g. "densityunitname"
+should be added with the human-readable name of the unit. This unit
+information can also live at the dataset level, but it probably makes
+more sense to put it instead at the higher level (IRATE supports both
+ways of doing it)
+
+* The Cosmology group (as defined in the IRATE specification) must be
+added - for simulations that are not technically "cosmological", you
+can just use one of the default cosmologies (WMAP7 is a reasonable
+choice - there's a function in the IRATE tools that automatically
+takes care of all the details for this).
+
+* optional: redo all the group names to follow the CamelCase
+convention - that's what we've been using elsewhere in IRATE.  This is
+an arbitrary choice, but it would be nice for it to be consistent
+throughout the format.
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/docs/gdf_specification.txt
--- /dev/null
+++ b/yt/utilities/grid_data_format/docs/gdf_specification.txt
@@ -0,0 +1,282 @@
+Gridded Data Format
+===================
+
+This is a pre-release of version 1.0 of this format.  Lots of formats have come
+before, but this one is simple and will work with yt; the idea is to create an
+import and export function in yt that will read this, so that other codes (such
+as ZEUS-MP) can export directly to it or convert their data to it, and so that
+yt can export to it from any format it recognizes and reads.
+
+Caveats and Notes
+-----------------
+
+#. We avoid having many attributes on many nodes, as access can be quite slow
+#. Cartesian data only for now
+#. All grids must have the same number of ghost zones.
+#. If “/grid_parent” does not exist, parentage relationships will be
+   reconstructed and assumed to allow multiple grids
+#. No parentage can skip levels
+#. All grids are at the same time
+#. This format is designed for single-fluid calculations (with color fields)
+   but it should be viewed as extensible to multiple-fluids.
+#. All fluid quantities are assumed to be in every grid, filling every zone.  Inside
+   a given grid, for a given particle type, all the affiliated fields must be the
+   same length.  (i.e., dark matter's velocity must be the same in all dimensions.)
+#. Everything is in a single file; for extremely large datasets, the user may
+   utilize HDF5 external links to link to files other than the primary.  (This
+   enables, for instance, Enzo datasets to have only a thin wrapper that creates
+   this format.)
+#. All fluid fields in this version of the format are assumed to have the
+   dimensionality of the grid they reside in plus any ghost zones, plus any
+   additionaly dimensionality required by the staggering property.
+#. Particles may have dataspaces affiliated with them.  (See Enzo's
+   OutputParticleTypeGrouping for more information.)  This enables a light
+   wrapper around data formats with interspersed particle types.
+#. Boundary conditions are very simply specified -- future revisions
+   will feature more complicated and rich specifications for the boundary.
+
+Furthermore, we make a distinction between fluid quantities and particle
+quantities.  Particles remain affiliated with grid nodes.  Positions of
+particles are global, but this will change with future versions of this
+document.
+
+Format Declaration
+------------------
+
+The file type is HDF5.  We require version 1.8 or greater.  At the root level,
+this group must exist: ::
+
+   /gridded_data_format
+
+This must contain the (float) attribute ``format_version``.  This document
+describes version 1.0.  Optional attributes may exist:
+
+``data_software``
+   string, references the application creating the file, not the
+   author of the data
+``data_software_version``
+   string, should reference a unique version number
+``data_author``
+   string, references the person or persons who created the data,
+   should include an email address
+``data_comment``
+   string, anything about the data
+
+Top Level Nodes
+---------------
+
+At least five top-level groups must exist, although some may be empty. ::
+
+   /gridded_data_format
+   /data
+   /simulation_parameters
+   /field_types
+   /particle_types
+
+Additionally, the grid structure elements must exist.  The 0-indexed index into this array
+defines a unique "Grid ID".
+
+``/grid_left_index``
+   (int64, Nx3): global, relative to current level, and only the active region
+``/grid_dimensions``
+   (int64, Nx3): only the active regions
+``/grid_level``
+   (int64, N): level, indexed by zero
+``/grid_particle_count``
+   (int64, N): total number of particles.  (May change in subsequent versions.)
+``/grid_parent_id``
+   (int64, N): optional, may only reference a single parent
+
+Grid Fields
+-----------
+
+Underneath ``/data/`` there must be entries for every grid, of the format
+``/data/grid_%010i``.  These grids need no attributes, and underneath them
+datasets live.
+
+Fluid Fields
+++++++++++++
+
+For every grid we then define ``/data/grid_%010i/%(field)s``.
+
+Where ``%(field)s`` draws from all of the fields defined.  We define no
+standard for which fields must be present, only the names and units.  Units
+should always be ''proper'' cgs (or conversion factors should be supplied, below), and
+field names should be drawn from this list, with these names.  Not all fields
+must be represented.  Field must extend beyond the active region if ghost zones
+are included.  All pre-defined fields are assumed to be cell-centered unless this
+is overridden in ``field_types``.
+
+  * ``density`` (g/cc)
+  * ``temperature`` (K)
+  * ``specific_thermal_energy`` (erg/g)
+  * ``specific_energy`` (erg/g, includes kinetic and magnetic)
+  * ``magnetic_energy`` (erg/g)
+  * ``velocity_x`` (cm/s)
+  * ``velocity_y`` (cm/s)
+  * ``velocity_z`` (cm/s)
+  * ``species_density_%s`` (g/cc) where %s is the species name including ionization
+    state, such as H2I, HI, HII, CO, "elec" for electron
+  * ``mag_field_x``
+  * ``mag_field_y``
+  * ``mag_field_z``
+
+Particle Fields
++++++++++++++++
+
+Particles are more expensive to sort and identify based on "type" -- for
+instance, dark matter versus star particles.  The particles should be separated
+based on type, under the group ``/data/grid_%010i/particles/``.
+
+The particles group will have sub-groups, each of which will be named after the
+type of particle it represents.  We only specify "dark_matter" as a type;
+anything else must be specified as described below.
+
+Each node, for instance ``/data/grid_%010i/particles/dark_matter/``, must
+contain the following fields:
+
+  * ``mass`` (g)
+  * ``id``
+  * ``position_x`` (in physical units)
+  * ``position_y`` (in physical units)
+  * ``position_z`` (in physical units)
+  * ``velocity_x`` (cm/s)
+  * ``velocity_y`` (cm/s)
+  * ``velocity_z`` (cm/s)
+  * ``dataspace`` (optional) an HDF5 dataspace to be used when opening
+    all affiliated fields.   If this is to be used, it must be appropriately set in
+    the particle type definition.  This is of type ``H5T_STD_REF_DSETREG``.
+    (See Enzo's OutputParticleTypeGrouping for an example.)
+
+Additional Fields
++++++++++++++++++
+
+Any additional fields from the data can be added, but must have a corresponding
+entry in the root field table (described below.)  The naming scheme is to be as
+explicit as possible, with units in cgs (or a conversion factor to the standard
+cgs unit, in the field table.)
+
+Attribute Table
+---------------
+
+In the root node, we define several groups which contain attributes.
+
+Simulation Parameters
++++++++++++++++++++++
+
+These attributes will all be associated with ``/simulation_parameters``.
+
+``refine_by``
+   relative global refinement
+``dimensionality``
+   1-, 2- or 3-D data
+``domain_dimensions``
+   dimensions in the top grid
+``current_time``
+   current time in simulation, in seconds, from “start” of simulation
+``domain_left_edge``
+   the left edge of the domain, in cm
+``domain_right_edge``
+   the right edge of the domain, in cm
+``unique_identifier``
+   regarded as a string, but can be anything
+``cosmological_simulation``
+   0 or 1
+``num_ghost_zones``
+   integer
+``field_ordering``
+   integer: 0 for C, 1 for Fortran
+``boundary_conditions``
+   integer (6): 0 for periodic, 1 for mirrored, 2 for outflow.  Needs one for each face
+   of the cube.  Any past the dimensionality should be set to -1.  The order of specification
+   goes left in 0th dimension, right in 0th dimension, left in 1st dimension, right in 1st dimensions,
+   left in 2nd dimension, right in 2nd dimension.  Note also that yt does not currently support non-periodic
+   boundary conditions, and that the assumption of periodicity shows up primarily in plots and
+   covering grids.
+
+Optionally, attributes for cosmological simulations can be provided, if
+cosmological_simulation above is set to 1:
+
+  * current_redshift
+  * omega_matter (at z=0)
+  * omega_lambda (at z=0)
+  * hubble_constant (h100)
+
+Fluid Field Attributes
+++++++++++++++++++++++
+
+Every field that is included that is not both in CGS already and in the list
+above requires parameters.  If a field is in the above list but is not in CGS,
+only the field_to_cgs attribute is necessary.  These will be stored under
+``/field_types`` and each must possess the following attributes:
+
+``field_name``
+   a string that will be used to describe the field; can contain spaces.
+``field_to_cgs``
+   a float that will be used to convert the field to cgs units, if necessary.
+   Set to 1.0 if no conversion necessary.  Note that if non-CGS units are desired
+   this field should simply be viewed as the value by which field values are
+   multiplied to get to some internally consistent unit system.
+``field_units``
+   a string that names the units.
+``staggering``
+   an integer: 0 for cell-centered, 1 for face-centered, 2 for vertex-centered.
+   Non-cellcentered data will be linearly-interpolated; more complicated
+   reconstruction will be defined in a future version of this standard; for 1.0
+   we only allow for simple definitions.
+
+Particle Types
+++++++++++++++
+
+Every particle type that is not recognized (i.e., all non-Dark Matter types)
+needs to have an entry under ``/particle_types``.  Each entry must possess the
+following attributes:
+
+``particle_type_name``
+   a string that will be used to describe the field; can contain spaces.
+``particle_use_dataspace``
+   (optional) if 1, the dataspace (see particle field definition above) will be used
+   for all particle fields for this type of particle.  Useful if a given type of particle
+   is embedded inside a larger list of different types of particle.
+``particle_type_num``
+   an integer giving the total number of particles of this type.
+
+For instance, to define a particle of type ``accreting_black_hole``, the file
+must contain ``/particle_types/accreting_black_hole``, with the
+``particle_type_name`` attribute of "Accreting Black Hole".
+
+Particle Field Attributes
++++++++++++++++++++++++++
+
+Every particle type that contains a new field (for instance, ``accretion_rate``)
+needs to have an entry under ``/particle_types/{particle_type_name}/{field_name}``
+containing the following attributes:
+
+``field_name``
+   a string that will be used to describe the field; can contain spaces.
+``field_to_cgs``
+   a float that will be used to convert the field to cgs units, if necessary.
+   Set to 1.0 if no conversion necessary.
+``field_units``
+   a string that names the units.
+
+Role of YT
+----------
+
+yt will provide a reader for this data, so that any data in this format can be
+used by the code.  Additionally, the names and specifications in this code
+reflect the internal yt data structures.
+
+yt will also provide a writer for this data, which will operate on any existing
+data format.  Provided that a simulation code can read this data, this will
+enable cross-platform comparison.  Furthermore, any external piece of software
+(i.e., Stranger) that implements reading this format will be able to read any
+format of data tha yt understands.
+
+Example File
+------------
+
+An example file constructed from the ``RD0005-mine`` dataset is available
+at http://yt.enzotools.org/files/RD0005.gdf .  It is not yet a complete
+conversion, but it is a working proof of concept.  Readers and writers are
+forthcoming.


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
@@ -0,0 +1,22 @@
+from grid_data_format import *
+import sys
+# Assumes that last input is the basename for the athena dataset.
+# i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030
+basename = sys.argv[-1]
+converter = AthenaDistributedConverter(basename)
+converter.convert()
+
+# If you have units information, set up a conversion dictionary for
+# each field.  Each key is the name of the field that Athena uses.
+# Each value is what you have to multiply the raw output from Athena
+# by to get cgs units.
+
+# code_to_cgs = {'density':1.0e3,
+# 	       'total_energy':1.0e-3,
+# 	       'velocity_x':1.2345,
+# 	       'velocity_y':1.2345,
+# 	       'velocity_z':1.2345}
+
+# converter = AthenaDistributedConverter(basename, field_conversions=code_to_cgs)
+# converter.convert()
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/scripts/convert_single_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/scripts/convert_single_athena.py
@@ -0,0 +1,23 @@
+from grid_data_format import *
+import sys
+# Assumes that last input is the basename for the athena dataset.
+# i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030
+basename = sys.argv[-1]
+converter = AthenaConverter(basename)
+converter.convert()
+
+# If you have units information, set up a conversion dictionary for
+# each field.  Each key is the name of the field that Athena uses.
+# Each value is what you have to multiply the raw output from Athena
+# by to get cgs units.
+
+# code_to_cgs = {'density':1.0e3,
+# 	       'total_energy':1.0e-3,
+# 	       'velocity_x':1.2345,
+# 	       'velocity_y':1.2345,
+# 	       'velocity_z':1.2345}
+
+# converter = AthenaDistributedConverter(basename, field_conversions=code_to_cgs)
+# converter.convert()
+
+


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/grid_data_format/setup.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('grid_data_format', parent_package, top_path)
+    config.add_subpackage("conversion")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -619,3 +619,16 @@
         # and check, use out array.
         result.append(na.mean(sorted[indexer], axis=axis, out=out))
     return na.array(result)
+
+def get_rotation_matrix(self, theta, rot_vector):
+    ux = rot_vector[0]
+    uy = rot_vector[1]
+    uz = rot_vector[2]
+    cost = na.cos(theta)
+    sint = na.sin(theta)
+    
+    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+                  [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
+                  [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+    
+    return R


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -58,6 +58,7 @@
             setattr(self, attr, getattr(obj, attr, None))
         if hasattr(obj, "pf"):
             self.output_hash = obj.pf._hash()
+            self._pf_mrep = obj.pf._mrep
 
     def __init__(self, obj):
         self._update_attrs(obj, self._attr_list)
@@ -93,6 +94,8 @@
         api_key = ytcfg.get("yt","hub_api_key")
         url = ytcfg.get("yt","hub_url")
         metadata, (final_name, chunks) = self._generate_post()
+        if hasattr(self, "_pf_mrep"):
+            self._pf_mrep.upload()
         for i in metadata:
             if isinstance(metadata[i], na.ndarray):
                 metadata[i] = metadata[i].tolist()
@@ -110,7 +113,15 @@
                                              'api_key' : api_key})
         request = urllib2.Request(url, datagen, headers)
         # Actually do the request, and get the response
-        rv = urllib2.urlopen(request).read()
+        try:
+            rv = urllib2.urlopen(request).read()
+        except urllib2.HTTPError as ex:
+            if ex.code == 401:
+                mylog.error("You must create an API key before uploading.")
+                mylog.error("https://data.yt-project.org/getting_started.html")
+                return
+            else:
+                raise ex
         uploader_info = json.loads(rv)
         new_url = url + "/handler/%s" % uploader_info['handler_uuid']
         for i, (cn, cv) in enumerate(chunks):
@@ -125,8 +136,9 @@
 
         datagen, headers = multipart_encode({'status' : 'FINAL'})
         request = urllib2.Request(new_url, datagen, headers)
-        rv = urllib2.urlopen(request).read()
-        return json.loads(rv)
+        rv = json.loads(urllib2.urlopen(request).read())
+        mylog.info("Upload succeeded!  View here: %s", rv['url'])
+        return rv
 
 class FilteredRepresentation(MinimalRepresentation):
     def _generate_post(self):
@@ -180,3 +192,25 @@
         chunks = [(fn, d) for fn, d in self.images]
         return (metadata, ('images', chunks))
 
+_hub_categories = ("News", "Documents", "Simulation Management",
+                   "Data Management", "Analysis and Visualization",
+                   "Paper Repositories", "Astrophysical Utilities",
+                   "yt Scripts")
+
+class MinimalProjectDescription(MinimalRepresentation):
+    type = "project"
+    _attr_list = ("title", "url", "description", "category", "image_url")
+
+    def __init__(self, title, url, description,
+                 category, image_url = ""):
+        assert(category in _hub_categories)
+        self.title = title
+        self.url = url
+        self.description = description
+        self.category = category
+        self.image_url = image_url
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = []
+        return (metadata, ("chunks", []))


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/orientation.py
--- /dev/null
+++ b/yt/utilities/orientation.py
@@ -0,0 +1,101 @@
+"""
+A class that manages the coordinate system for orientable data
+containers and cameras.
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+License:
+  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+
+from yt.funcs import *
+from yt.utilities.math_utils import get_rotation_matrix
+
+class Orientation:
+    def __init__(self, normal_vector, north_vector=None, steady_north=False):
+        r"""An object that returns a set of basis vectors for orienting
+        cameras a data containers.
+
+        Parameters
+        ----------
+        center        : array_like
+           The current "center" of the view port -- the normal_vector connects
+           the center and the origin
+        normal_vector : array_like
+           A vector normal to the image plane
+        north_vector  : array_like, optional
+           The 'up' direction to orient the image plane.  
+           If not specified, gets calculated automatically
+        steady_north  : bool, optional
+           Boolean to control whether to normalize the north_vector
+           by subtracting off the dot product of it and the normal 
+           vector.  Makes it easier to do rotations along a single
+           axis.  If north_vector is specified, is switched to
+           True.  Default: False
+           
+        """
+        self.steady_north = steady_north
+        if na.all(north_vector == normal_vector):
+            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
+            north_vector = None
+        if north_vector is not None: self.steady_north = True
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def _setup_normalized_vectors(self, normal_vector, north_vector):
+        # Now we set up our various vectors
+        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        if north_vector is None:
+            vecs = na.identity(3)
+            t = na.cross(normal_vector, vecs).sum(axis=1)
+            ax = t.argmax()
+            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = na.cross(normal_vector, east_vector).ravel()
+        else:
+            if self.steady_north:
+                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
+            east_vector = na.cross(north_vector, normal_vector).ravel()
+        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
+        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+        self.normal_vector = normal_vector
+        self.north_vector = north_vector
+        self.unit_vectors = [east_vector, north_vector, normal_vector]
+        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        
+    def switch_orientation(self, normal_vector=None, north_vector=None):
+        r"""Change the view direction based on any of the orientation parameters.
+
+        This will recalculate all the necessary vectors and vector planes related
+        to a an orientable object.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+        


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -288,7 +288,7 @@
         if size is None:
             size = len(self.available_ranks)
         if len(self.available_ranks) < size:
-            print 'Not enough resources available'
+            print 'Not enough resources available', size, self.available_ranks
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
@@ -315,15 +315,34 @@
         for wg in self.workgroups:
             self.free_workgroup(wg)
 
+    @classmethod
+    def from_sizes(cls, sizes):
+        sizes = ensure_list(sizes)
+        pool = cls()
+        rank = pool.comm.rank
+        for i,size in enumerate(sizes):
+            if iterable(size):
+                size, name = size
+            else:
+                name = "workgroup_%02i" % i
+            pool.add_workgroup(size, name = name)
+        for wg in pool.workgroups:
+            if rank in wg.ranks: workgroup = wg
+        return pool, workgroup
+
+    def __getitem__(self, key):
+        for wg in self.workgroups:
+            if wg.name == key: return wg
+        raise KeyError(key)
+
 class ResultsStorage(object):
     slots = ['result', 'result_id']
     result = None
     result_id = None
 
-def parallel_objects(objects, njobs, storage = None):
+def parallel_objects(objects, njobs = 0, storage = None, barrier = True):
     if not parallel_capable:
         njobs = 1
-        mylog.warn("parallel_objects() is being used when parallel_capable is false. The loop is not being run in parallel. This may not be what was expected.")
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
     if njobs <= 0:
@@ -362,6 +381,8 @@
         new_storage = my_communicator.par_combine_object(
                 to_share, datatype = 'dict', op = 'join')
         storage.update(new_storage)
+    if barrier:
+        my_communicator.barrier()
 
 class CommunicationSystem(object):
     communicators = []
@@ -395,6 +416,9 @@
         self.communicators.pop()
         self._update_parallel_state(self.communicators[-1])
 
+def _reconstruct_communicator():
+    return communication_system.communicators[-1]
+
 class Communicator(object):
     comm = None
     _grids = None
@@ -409,6 +433,11 @@
     functions for analyzing something in parallel.
     """
 
+    def __reduce__(self):
+        # We don't try to reconstruct any of the properties of the communicator
+        # or the processors.  In general, we don't want to.
+        return (_reconstruct_communicator, ())
+
     def barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
@@ -507,29 +536,30 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def mpi_bcast(self, data):
+    def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
         if isinstance(data, na.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
-            if self.comm.rank == 0:
+            if self.comm.rank == root:
                 info = (data.shape, data.dtype)
             else:
                 info = ()
-            info = self.comm.bcast(info, root=0)
-            if self.comm.rank != 0:
+            info = self.comm.bcast(info, root=root)
+            if self.comm.rank != root:
                 data = na.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
-            self.comm.Bcast([data, mpi_type], root = 0)
+            self.comm.Bcast([data, mpi_type], root = root)
             return data
         else:
             # Use pickled methods.
-            data = self.comm.bcast(data, root = 0)
+            data = self.comm.bcast(data, root = root)
             return data
 
     def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
         # if so, we load *everything* that we need.  Use with some care.
+        if len(fields) == 0: return
         mylog.debug("Preloading %s from %s grids", fields, len(grids))
         if not self._distributed: return
         io_handler.preload(grids, fields)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -52,6 +52,7 @@
     config.add_subpackage("kdtree")
     config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
     config.add_subpackage("spatial")
+    config.add_subpackage("grid_data_format")
     config.add_subpackage("parallel_tools")
     config.add_subpackage("_amr_utils")
     config.add_extension("data_point_utilities",


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -374,6 +374,6 @@
                                self.data_source.center, self.data_source._inv_mat, indices,
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
-                               self.bounds).transpose()
+                               self.bounds)
         self[item] = buff
         return buff




diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -24,7 +24,10 @@
 """
 
 from matplotlib import figure
+import shutil
+import tempfile
 import numpy as na
+import os
 
 from yt.funcs import *
 
@@ -47,6 +50,8 @@
     PhasePlot, \
     LineQueryPlot, \
     ScatterPlot
+from yt.utilities.minimal_representation import \
+    MinimalImageCollectionData
 
 # No better place to put this
 def concatenate_pdfs(output_fn, input_fns):
@@ -60,6 +65,18 @@
 def _fix_axis(axis):
     return inv_axis_names.get(axis, axis)
 
+
+class ImageCollection(object):
+    def __init__(self, pf, name):
+        self.pf = pf
+        self.name = name
+        self.images = []
+        self.image_metadata = []
+
+    def add_image(self, fn, descr):
+        self.image_metadata.append(descr)
+        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+
 class PlotCollection(object):
     __id_counter = 0
     def __init__(self, pf, center=None):
@@ -117,6 +134,19 @@
         for p in self.plots:
             yield p
 
+    @property
+    def _mrep(self):
+        ic = ImageCollection(self.pf, "Plot Collection with center %s" % self.c)
+        dd = tempfile.mkdtemp()
+        fns = self.save(os.path.join(dd, "temp"))
+        for fn, p in zip(fns, self.plots):
+            ic.add_image(fn, p._pretty_name())
+        shutil.rmtree(dd)
+        return MinimalImageCollectionData(ic)
+
+    def hub_upload(self):
+        self._mrep.upload()
+
     def save(self, basename=None, format="png", override=False, force_save=False):
         r"""Save out all the plots hanging off this plot collection, using
         generated names.


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -935,14 +935,15 @@
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
                  ptype=None, stars_only=False, dm_only=False,
-                 minimum_mass=None):
+                 minimum_mass=None, alpha=1.0):
         """
         Adds particle positions, based on a thick slab along *axis* with a
         *width* along the line of sight.  *p_size* controls the number of
         pixels per particle, and *col* governs the color.  *ptype* will
         restrict plotted particles to only those that are of a given type.
         *minimum_mass* will require that the particles be of a given mass,
-        calculated via ParticleMassMsun, to be plotted.
+        calculated via ParticleMassMsun, to be plotted. *alpha* determines
+        each particle's opacity.
         """
         PlotCallback.__init__(self)
         self.width = width
@@ -954,6 +955,7 @@
         self.stars_only = stars_only
         self.dm_only = dm_only
         self.minimum_mass = minimum_mass
+        self.alpha = alpha
 
     def __call__(self, plot):
         data = plot.data
@@ -984,7 +986,7 @@
                     [reg[field_x][gg][::self.stride],
                      reg[field_y][gg][::self.stride]])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
-                           s=self.p_size, c=self.color)
+                           s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -295,6 +295,17 @@
             if not hasattr(c, '_type_name'): continue
             self.modify[c._type_name] = c
 
+    def _pretty_name(self):
+        width = self.im.get("Width", "NA")
+        unit = self.im.get("Unit", "NA")
+        field = self.axis_names.get("Z", self.axis_names.get("Field1"))
+        if hasattr(self.data, "_data_source"):
+            data = self.data._data_source
+        else:
+            data = self.data
+        return "%s: %s (%s %s) %s" % (self._type_name,
+            field, width, unit, data)
+
 class VMPlot(RavenPlot):
     _antialias = True
     _period = (0.0, 0.0)
@@ -493,6 +504,7 @@
         if self.colorbar != None:
             self.colorbar.set_label(str(data_label), **self.label_kws)
 
+
 class FixedResolutionPlot(VMPlot):
 
     # This is a great argument in favor of changing the name


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -37,6 +37,7 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
+
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection, FisheyeCamera, MosaicFisheyeCamera, \
     HEALpixCamera, InteractiveCamera, ProjectionCamera


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -31,15 +31,16 @@
 from .grid_partitioner import HomogenizedVolume
 from .transfer_functions import ProjectionTransferFunction
 
-#from yt.utilities.amr_utils import \
-#    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
-#    arr_ang2pix_nest
+from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
+    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
+    arr_ang2pix_nest, arr_fisheye_vectors, rotate_vectors
+from yt.utilities.math_utils import get_rotation_matrix
+from yt.utilities.orientation import Orientation
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from numpy import pi
 
 from yt.utilities.amr_utils import \
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
@@ -81,7 +82,7 @@
             Boolean to control whether to normalize the north_vector
             by subtracting off the dot product of it and the normal
             vector.  Makes it easier to do rotations along a single
-            axis.  If north_vector is specifies, is switched to
+            axis.  If north_vector is specified, is switched to
             True. Default: False
         volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
             The volume to ray cast through.  Can be specified for finer-grained
@@ -143,12 +144,6 @@
             prone to longer data IO times.  If all the data can fit in
             memory on each cpu, this can be the fastest option for
             multiple ray casts on the same dataset.
-        expand_factor: float, optional
-            A parameter to be used with the PerspectiveCamera.
-            Controls how much larger a volume to render, which is
-            currently difficult to gauge for the PerspectiveCamera.
-            For full box renders, values in the 2.0-3.0 range seem to
-            produce desirable results. Default: 1.0
         le: array_like, optional
             Specifies the left edge of the volume to be rendered.
             Currently only works with use_kd=True.
@@ -195,23 +190,13 @@
         self.sub_samples = sub_samples
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
-        self.width = width
-        self.center = center
-        self.steady_north = steady_north
-        self.expand_factor = expand_factor
-        # This seems to be necessary for now.  Not sure what goes wrong when not true.
-        if na.all(north_vector == normal_vector):
-            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
-            north_vector == None
-        if north_vector is not None: self.steady_north=True
-        self.north_vector = north_vector
-        self.rotation_vector = north_vector
+        self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
-        self._setup_normalized_vectors(normal_vector, north_vector)
         self.log_fields = log_fields
         self.use_kd = use_kd
         self.l_max = l_max
@@ -233,40 +218,21 @@
             self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def _setup_normalized_vectors(self, normal_vector, north_vector):
-        # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            if self.rotation_vector is None:
-                self.rotation_vector=north_vector
-        else:
-            if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.normal_vector = normal_vector
-        self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = self.center - 0.5*self.width[0]*self.unit_vectors[0] \
-                                  - 0.5*self.width[1]*self.unit_vectors[1] \
-                                  - 0.5*self.width[2]*self.unit_vectors[2]
-        self.back_center = self.center - 0.5*self.width[2]*self.unit_vectors[2]
-        self.front_center = self.center + 0.5*self.width[2]*self.unit_vectors[2]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+    def _setup_box_properties(self, width, center, unit_vectors):
+        self.width = width
+        self.center = center
+        self.box_vectors = na.array([unit_vectors[0]*width[0],
+                                     unit_vectors[1]*width[1],
+                                     unit_vectors[2]*width[2]])
+        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.back_center =  center - 0.5*width[0]*unit_vectors[2]
+        self.front_center = center + 0.5*width[0]*unit_vectors[2]         
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera to point at a new location.
+        This will recalculate all the necessary vectors and vector planes to orient
+        the image plane so that it points at a new location.
 
         Parameters
         ----------
@@ -278,13 +244,14 @@
             calculated automatically.
         """
         normal_vector = self.front_center - new_center
-        self._setup_normalized_vectors(normal_vector, north_vector)
+        self.orienter.switch_orientation(normal_vector=normal_vector,
+                                         north_vector = north_vector)
 
     def switch_view(self, normal_vector=None, width=None, center=None, north_vector=None):
-        r"""Change the view direction based on any of the view parameters.
+        r"""Change the view based on any of the view parameters.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera with new normal vectors, widths, centers, or north vectors.
+        This will recalculate the orientation and width based on any of
+        normal_vector, width, center, and north_vector.
 
         Parameters
         ----------
@@ -307,15 +274,16 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.north_vector
+            north_vector = self.orienter.north_vector
         if normal_vector is None:
-            normal_vector = self.front_center-self.center
-        self._setup_normalized_vectors(normal_vector, north_vector)
-
+            normal_vector = self.front_cemter - self.center
+        self.orienter.switch_orientation(normal_vector = normal_vector,
+                                         north_vector = north_vector)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
     def new_image(self):
         image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
-        
+
     def get_sampler_args(self, image):
         rotp = na.concatenate([self.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
@@ -353,13 +321,13 @@
                 for data in brick.my_data:
                     if na.any(na.isnan(data)):
                         raise RuntimeError
-        
+
         view_pos = self.front_center + self.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
             total_cells += na.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
-        
+
         pbar.finish()
         image = sampler.aimage
         self.finalize_image(image)
@@ -437,8 +405,7 @@
 
         """
         self.width = [w / factor for w in self.width]
-        self._setup_normalized_vectors(
-                self.unit_vectors[2], self.unit_vectors[1])
+        self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
 
     def zoomin(self, final, n_steps, clip_ratio = None):
         r"""Loop over a zoomin and return snapshots along the way.
@@ -551,15 +518,7 @@
         if rot_vector is None:
             rot_vector = self.rotation_vector
             
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+        R = get_rotation_matrix(self, theta, rot_vector)
 
         normal_vector = self.front_center-self.center
 
@@ -608,8 +567,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 tree_type='domain',le=None, re=None):
         self.frames = []
         Camera.__init__(self, center, normal_vector, width,
                  resolution, transfer_function,
@@ -618,8 +576,7 @@
                  log_fields = log_fields,
                  sub_samples = sub_samples, pf = pf,
                  use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-                 tree_type=tree_type,expand_factor=expand_factor,
-                 le=le, re=re)
+                 tree_type=tree_type,le=le, re=re)
 
     def snapshot(self, fn = None, clip_ratio = None):
         import matplotlib
@@ -657,7 +614,6 @@
 data_object_registry["interactive_camera"] = InteractiveCamera
 
 class PerspectiveCamera(Camera):
-    
     def get_sampler_args(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
@@ -665,24 +621,24 @@
         dl = (self.back_center - self.front_center)
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
-        
+
         px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
         py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
+        inv_mat = self.orienter.inv_mat
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
         positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
         bounds = (px.min(), px.max(), py.min(), py.max())
-        
+
         # We are likely adding on an odd cutting condition here
         vectors = self.front_center - positions
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
-        
+
         uv = na.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
@@ -1191,7 +1147,7 @@
             rot_vector = na.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
-            self.rotation_matrix = self.get_rotation_matrix(angle,rot_vector)
+            self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
             self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
             self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
             self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
@@ -1305,10 +1261,10 @@
         """
         if rot_vector is None:
             rot_vector = self.north_vector
-        
+
         dist = ((self.focal_center - self.center)**2).sum()**0.5
-        
-        R = self.get_rotation_matrix(theta, rot_vector)
+
+        R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
         self.normal_vector = na.dot(R,self.normal_vector)
@@ -1362,14 +1318,13 @@
         exponential : boolean
             Specifies whether the move/zoom transition follows an
             exponential path toward the destination or linear
-            
+
         Examples
         --------
 
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
-        ...     cam.save_image("move_%04i.png" % i)
+        ...     cam.save_image('move_%04i.png' % i)
         """
-
         if exponential:
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
@@ -1382,20 +1337,6 @@
                 self.center += dx
             yield self.snapshot()
 
-    def get_rotation_matrix(self, theta, rot_vector):
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
-
-        return R
-
-
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, num_threads = 0):
     r"""Project through a parameter file, off-axis, and return the image plane.
@@ -1411,7 +1352,7 @@
     pf : `~yt.data_objects.api.StaticOutput`
         This is the parameter file to volume render.
     center : array_like
-        The current "center" of the view port -- the focal point for the
+        The current 'center' of the view port -- the focal point for the
         camera.
     normal_vector : array_like
         The vector between the camera position and the center.
@@ -1443,16 +1384,16 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    
+
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
-        def _make_wf(f, w):
-            def temp_weightfield(a, b):
-                tr = b[f].astype("float64") * b[w]
-                return tr
-            return temp_weightfield
+        def _wf(f1, w1):
+            def WeightField(field, data):
+                return data[f1].astype("float64") * \
+                       data[w1].astype("float64")
+            return WeightField
         pf.field_info.add_field("temp_weightfield",
-            function=_make_wf(field, weight))
+                    function=_wf(field, weight))
         fields = ["temp_weightfield", weight]
     image = na.zeros((resolution, resolution, 3), dtype='float64',
                       order='C')
@@ -1599,7 +1540,10 @@
     else:
         image[:,:,0] /= image[:,:,1]
         pf.field_info.pop("temp_weightfield")
-    return image[:,0,0]
+        for g in pf.h.grids:
+            if "temp_weightfield" in g.keys():
+                del g["temp_weightfield"]
+    return image
 
 def plot_allsky_healpix(image, nside, fn, label = "", rotation = None,
                         take_log = True, resolution=512):


diff -r 2878e15dc70e915c8ee202ff84df321a51f2c3c0 -r 88016491ea7599f84670597400b8152073820d0a yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -41,7 +41,8 @@
 class HomogenizedVolume(ParallelAnalysisInterface):
     bricks = None
     def __init__(self, fields = "Density", source = None, pf = None,
-                 log_fields = None, no_ghost = False):
+                 log_fields = None, no_ghost = False,
+                 max_level = 48):
         # Typically, initialized as hanging off a hierarchy.  But, not always.
         ParallelAnalysisInterface.__init__(self)
         self.no_ghost = no_ghost
@@ -54,6 +55,7 @@
         else:
             log_fields = [self.pf.field_info[field].take_log
                          for field in self.fields]
+        self.max_level = max_level
         self.log_fields = log_fields
 
     def traverse(self, back_point, front_point, image):
@@ -84,8 +86,13 @@
         PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
 
         pgs = []
+        cm = grid.child_mask.copy()
+        if grid.Level > self.max_level:
+            return pgs
+        elif grid.Level == self.max_level:
+            cm[:] = 1
         for P in PP.sweep(0):
-            sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
+            sl = P.get_brick(grid.LeftEdge, grid.dds, cm)
             if len(sl) == 0: continue
             dd = [d[sl[0][0]:sl[0][1]+1,
                     sl[1][0]:sl[1][1]+1,



https://bitbucket.org/yt_analysis/yt/changeset/dd9da6d9e057/
changeset:   dd9da6d9e057
branch:      yt
user:        samskillman
date:        2012-06-05 17:28:51
summary:     Fixing a few imports, and self.unit_vectors to self.orienter.unit_vectors
affected #:  1 file

diff -r 88016491ea7599f84670597400b8152073820d0a -r dd9da6d9e0576f876d4c3dd998ad43ae97d1fceb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -31,9 +31,9 @@
 from .grid_partitioner import HomogenizedVolume
 from .transfer_functions import ProjectionTransferFunction
 
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
-    arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
-    arr_ang2pix_nest, arr_fisheye_vectors, rotate_vectors
+from yt.utilities.amr_utils import \
+    arr_vec2pix_nest, arr_pix2vec_nest, \
+    arr_ang2pix_nest, arr_fisheye_vectors
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.utilities.orientation import Orientation
 from yt.visualization.image_writer import write_bitmap, write_image
@@ -285,11 +285,11 @@
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
-                image, self.unit_vectors[0], self.unit_vectors[1],
+                image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 na.array(self.width),
                 self.transfer_function, self.sub_samples)
         return args
@@ -299,9 +299,9 @@
             if self.light_dir is None:
                 self.set_default_light_dir()
             temp_dir = na.empty(3,dtype='float64')
-            temp_dir = self.light_dir[0] * self.unit_vectors[1] + \
-                    self.light_dir[1] * self.unit_vectors[2] + \
-                    self.light_dir[2] * self.unit_vectors[0]
+            temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
+                    self.light_dir[1] * self.orienter.unit_vectors[2] + \
+                    self.light_dir[2] * self.orienter.unit_vectors[0]
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
@@ -322,7 +322,7 @@
                     if na.any(na.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center + self.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
             total_cells += na.prod(brick.my_data[0].shape)
@@ -855,7 +855,7 @@
 
     def split(self):
         oc = self.original_camera
-        uv = oc.unit_vectors
+        uv = oc.orienter.unit_vectors
         c = oc.center
         fc = oc.front_center
         wx, wy, wz = oc.width
@@ -1587,12 +1587,12 @@
 
     def get_sampler_args(self, image):
         width = self.width[2]
-        north_vector = self.unit_vectors[0]
-        east_vector = self.unit_vectors[1]
-        normal_vector = self.unit_vectors[2]
+        north_vector = self.orienter.unit_vectors[0]
+        east_vector = self.orienter.unit_vectors[1]
+        normal_vector = self.orienter.unit_vectors[2]
 
         back_center= self.center - 0.5*width * normal_vector
-        rotp = na.concatenate([na.linalg.pinv(self.unit_vectors).ravel('F'),
+        rotp = na.concatenate([na.linalg.pinv(self.orienter.unit_vectors).ravel('F'),
                                back_center])
 
         args = (rotp, normal_vector * width, back_center,
@@ -1617,9 +1617,9 @@
         # Back corners ...
         pf = self.pf
         width = self.width[2]
-        north_vector = self.unit_vectors[0]
-        east_vector = self.unit_vectors[1]
-        normal_vector = self.unit_vectors[2]
+        north_vector = self.orienter.unit_vectors[0]
+        east_vector = self.orienter.unit_vectors[1]
+        normal_vector = self.orienter.unit_vectors[2]
         fields = self.fields
 
         mi = pf.domain_right_edge.copy()



https://bitbucket.org/yt_analysis/yt/changeset/703f14e325b3/
changeset:   703f14e325b3
branch:      yt
user:        samskillman
date:        2012-06-05 18:01:10
summary:     Fixing the PerspectiveCamera
affected #:  1 file

diff -r dd9da6d9e0576f876d4c3dd998ad43ae97d1fceb -r 703f14e325b3d1f2db09362658fe6666a25742b5 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -56,7 +56,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
+                 tree_type='domain',
                  le=None, re=None, use_light=False):
         r"""A viewpoint into a volume, for volume rendering.
 
@@ -614,6 +614,25 @@
 data_object_registry["interactive_camera"] = InteractiveCamera
 
 class PerspectiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', expand_factor=1.0,
+                 le=None, re=None, use_light=False):
+        self.expand_factor = expand_factor
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector=north_vector, steady_north=steady_north,
+                 volume=volume, fields=fields,
+                 log_fields=log_fields,
+                 sub_samples=sub_samples, pf=pf,
+                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
+                 tree_type=tree_type, le=le, re=re, use_light=use_light)
+
     def get_sampler_args(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module



https://bitbucket.org/yt_analysis/yt/changeset/925f23c74fd5/
changeset:   925f23c74fd5
branch:      yt
user:        samskillman
date:        2012-06-05 23:42:35
summary:     Now the alpha value passed to add_gaussian actually matters.
affected #:  1 file

diff -r 703f14e325b3d1f2db09362658fe6666a25742b5 -r 925f23c74fd51d94198f164d9b71cf41a2fcd758 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -393,8 +393,9 @@
         >>> tf = ColorTransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, [1.0, 0.0, 0.0, 1.0])
         """
+        alpha = height[3]
         for tf, v in zip(self.funcs, height):
-            tf.add_gaussian(location, width, v)
+            tf.add_gaussian(location, width, alpha*v)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.



https://bitbucket.org/yt_analysis/yt/changeset/6c061d75d875/
changeset:   6c061d75d875
branch:      yt
user:        samskillman
date:        2012-06-06 23:16:49
summary:     Some fixes to camera rotations from the orientation merge, modified what alpha does for sample_colormap, and added a tf.clear() that zeros out the TF.  Useful for interactive work
affected #:  2 files

diff -r 925f23c74fd51d94198f164d9b71cf41a2fcd758 -r 6c061d75d875ef1dd38af2075458cca8dca86abf yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -191,6 +191,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
+        self.rotation_vector = self.orienter.north_vector
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -279,7 +280,7 @@
             normal_vector = self.front_cemter - self.center
         self.orienter.switch_orientation(normal_vector = normal_vector,
                                          north_vector = north_vector)
-        self._setup_box_properties(width, center, self.orienter.unit_vectors)
+        self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
         image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
@@ -517,7 +518,7 @@
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
-            
+          
         R = get_rotation_matrix(self, theta, rot_vector)
 
         normal_vector = self.front_center-self.center


diff -r 925f23c74fd51d94198f164d9b71cf41a2fcd758 -r 6c061d75d875ef1dd38af2075458cca8dca86abf yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -222,6 +222,9 @@
         pylab.xlim(*self.x_bounds)
         pylab.ylim(0.0, 1.0)
         pylab.draw()
+        
+    def clear(self):
+        self.y[:]=0.0
 
 class MultiVariateTransferFunction(object):
     def __init__(self):
@@ -395,7 +398,7 @@
         """
         alpha = height[3]
         for tf, v in zip(self.funcs, height):
-            tf.add_gaussian(location, width, alpha*v)
+            tf.add_gaussian(location, width, v)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -557,7 +560,10 @@
         cmap = get_cmap(colormap)
         r,g,b,a = cmap(rel)
         if alpha is None: alpha = a
-        self.add_gaussian(v, w, [r,g,b,alpha])
+        r *= alpha
+        g *= alpha
+        b *= alpha    
+        self.add_gaussian(v, w, [r, g, b, alpha])
         mylog.debug("Adding gaussian at %s with width %s and colors %s" % (
                 v, w, (r,g,b,alpha)))
 
@@ -645,6 +651,11 @@
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
+            
+    def clear(self):
+        for f in self.funcs:
+            f.clear()
+
 
 class ProjectionTransferFunction(MultiVariateTransferFunction):
     def __init__(self, x_bounds = (-1e60, 1e60), n_fields = 1):



https://bitbucket.org/yt_analysis/yt/changeset/6b8b7931facd/
changeset:   6b8b7931facd
branch:      yt
user:        MatthewTurk
date:        2012-06-07 14:50:22
summary:     Make offset an int64.
affected #:  1 file

diff -r 6c061d75d875ef1dd38af2075458cca8dca86abf -r 6b8b7931facd69bc9f785a10176cb78553b17ba3 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -232,7 +232,8 @@
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
-        cdef int vi, vj, hit, i, j, ni, nj, nn, offset
+        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef np.int64_t offset
         cdef int iter[4]
         cdef VolumeContainer *vc = pg.container
         cdef ImageContainer *im = self.image



https://bitbucket.org/yt_analysis/yt/changeset/1de29bd5b124/
changeset:   1de29bd5b124
branch:      yt
user:        MatthewTurk
date:        2012-06-07 17:19:53
summary:     This simple change seems to fix the overflow issues with tiny little grids and
big viewing planes.
affected #:  2 files

diff -r 6b8b7931facd69bc9f785a10176cb78553b17ba3 -r 1de29bd5b124936d6ab58e67617ccc7c55cffc60 yt/utilities/_amr_utils/fp_utils.pxd
--- a/yt/utilities/_amr_utils/fp_utils.pxd
+++ b/yt/utilities/_amr_utils/fp_utils.pxd
@@ -51,6 +51,11 @@
     if i > b: return b
     return i
 
+cdef inline int i64clip(np.int64_t i, np.int64_t a, np.int64_t b) nogil:
+    if i < a: return a
+    if i > b: return b
+    return i
+
 cdef inline np.float64_t fclip(np.float64_t f,
                       np.float64_t a, np.float64_t b) nogil:
     return fmin(fmax(f, a), b)


diff -r 6b8b7931facd69bc9f785a10176cb78553b17ba3 -r 1de29bd5b124936d6ab58e67617ccc7c55cffc60 yt/utilities/_amr_utils/grid_traversal.pyx
--- a/yt/utilities/_amr_utils/grid_traversal.pyx
+++ b/yt/utilities/_amr_utils/grid_traversal.pyx
@@ -29,7 +29,7 @@
 cimport kdtree_utils
 cimport healpix_interface
 from stdlib cimport malloc, free, abs
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
 from field_interpolation_tables cimport \
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
     FIT_eval_transfer_with_light
@@ -186,7 +186,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void get_start_stop(self, np.float64_t *ex, int *rv):
+    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv):
         # Extrema need to be re-centered
         cdef np.float64_t cx, cy
         cdef ImageContainer *im = self.image
@@ -233,22 +233,21 @@
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
         cdef int vi, vj, hit, i, j, ni, nj, nn
-        cdef np.int64_t offset
-        cdef int iter[4]
+        cdef np.int64_t offset, iter[4]
         cdef VolumeContainer *vc = pg.container
         cdef ImageContainer *im = self.image
         self.setup(pg)
         if self.sampler == NULL: raise RuntimeError
         cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4]
         hit = 0
-        cdef int nx, ny, size
+        cdef np.int64_t nx, ny, size
         if im.vd_strides[0] == -1:
             self.calculate_extent(extrema, vc)
             self.get_start_stop(extrema, iter)
-            iter[0] = iclip(iter[0]-1, 0, im.nv[0])
-            iter[1] = iclip(iter[1]+1, 0, im.nv[0])
-            iter[2] = iclip(iter[2]-1, 0, im.nv[1])
-            iter[3] = iclip(iter[3]+1, 0, im.nv[1])
+            iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
+            iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
+            iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
+            iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
             nx = (iter[1] - iter[0])
             ny = (iter[3] - iter[2])
             size = nx * ny



https://bitbucket.org/yt_analysis/yt/changeset/9ac0402f5338/
changeset:   9ac0402f5338
branch:      yt
user:        MatthewTurk
date:        2012-06-07 19:48:27
summary:     Moving yt/utilities/_amr_utils to yt/utilities/lib
affected #:  115 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/0fd240083d6b/
changeset:   0fd240083d6b
branch:      yt
user:        MatthewTurk
date:        2012-06-07 20:33:31
summary:     Another amr_utils fix.
affected #:  1 file

diff -r 9ac0402f5338571518c009342c902cf890cf327b -r 0fd240083d6b077c9364f91db871e75e88f8e622 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -27,7 +27,7 @@
 import numpy as na
 
 from yt.funcs import *
-import amr_utils
+import yt.utilities.lib as lib
 
 class UnilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
@@ -52,7 +52,7 @@
                 x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
 
         my_vals = na.zeros(x_vals.shape, dtype='float64')
-        amr_utils.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
+        lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
         return my_vals.reshape(orig_shape)
 
 class BilinearFieldInterpolator:
@@ -83,7 +83,7 @@
                 y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
 
         my_vals = na.zeros(x_vals.shape, dtype='float64')
-        amr_utils.BilinearlyInterpolate(self.table,
+        lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
         return my_vals.reshape(orig_shape)
@@ -121,7 +121,7 @@
                 z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
 
         my_vals = na.zeros(x_vals.shape, dtype='float64')
-        amr_utils.TrilinearlyInterpolate(self.table,
+        lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
                                  x_i, y_i, z_i, my_vals)



https://bitbucket.org/yt_analysis/yt/changeset/38c76542e2a2/
changeset:   38c76542e2a2
branch:      yt
user:        MatthewTurk
date:        2012-06-07 20:34:17
summary:     One more fix.
affected #:  1 file

diff -r 0fd240083d6b077c9364f91db871e75e88f8e622 -r 38c76542e2a281ab9e280db2c8b860465dc13717 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -30,7 +30,7 @@
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
 from yt.utilities.lib import kdtree_get_choices
-from yt.utilities._amr_utils.grid_traversal import PartitionedGrid
+from yt.utilities.lib.grid_traversal import PartitionedGrid
 from yt.utilities.performance_counters import yt_counters, time_function
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 



https://bitbucket.org/yt_analysis/yt/changeset/42fe48b6f673/
changeset:   42fe48b6f673
branch:      yt
user:        MatthewTurk
date:        2012-06-07 20:58:12
summary:     Removing pylab import
affected #:  1 file

diff -r 38c76542e2a281ab9e280db2c8b860465dc13717 -r 42fe48b6f67311ff636ce06d7b5b8e5d21fd7010 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -34,7 +34,6 @@
 from yt.utilities.performance_counters import yt_counters, time_function
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
-import matplotlib.pylab as pl
 from copy import deepcopy
 from yt.config import ytcfg
 from time import time



https://bitbucket.org/yt_analysis/yt/changeset/e907fb850d84/
changeset:   e907fb850d84
branch:      yt
user:        samskillman
date:        2012-06-07 22:40:25
summary:     Adding a camera.roll(), fixing up the InteractiveCamera
affected #:  1 file

diff -r 38c76542e2a281ab9e280db2c8b860465dc13717 -r e907fb850d84d6c77a5369a7e91bc8458fd409d1 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -277,7 +277,7 @@
         if north_vector is None:
             north_vector = self.orienter.north_vector
         if normal_vector is None:
-            normal_vector = self.front_cemter - self.center
+            normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
@@ -525,6 +525,25 @@
 
         self.switch_view(normal_vector=na.dot(R,normal_vector))
 
+    def roll(self, theta):
+        r"""Roll by a given angle
+
+        Roll the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to roll the view.
+
+        Examples
+        --------
+
+        >>> cam.roll(na.pi/4)
+        """
+        rot_vector = self.orienter.normal_vector
+        R = get_rotation_matrix(self, theta, rot_vector)
+        north_vector = self.orienter.north_vector
+        self.switch_view(north_vector=na.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -561,23 +580,7 @@
 data_object_registry["camera"] = Camera
 
 class InteractiveCamera(Camera):
-    def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
-                 north_vector = None, steady_north=False,
-                 volume = None, fields = None,
-                 log_fields = None,
-                 sub_samples = 5, pf = None,
-                 use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',le=None, re=None):
-        self.frames = []
-        Camera.__init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
-                 north_vector = north_vector, steady_north=steady_north,
-                 volume = volume, fields = fields,
-                 log_fields = log_fields,
-                 sub_samples = sub_samples, pf = pf,
-                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-                 tree_type=tree_type,le=le, re=re)
+    frames = []
 
     def snapshot(self, fn = None, clip_ratio = None):
         import matplotlib



https://bitbucket.org/yt_analysis/yt/changeset/d934245c03ba/
changeset:   d934245c03ba
branch:      yt
user:        samskillman
date:        2012-06-07 22:45:25
summary:     Merging
affected #:  1 file

diff -r e907fb850d84d6c77a5369a7e91bc8458fd409d1 -r d934245c03baaaeadffc62ff4f5178dfa57cc4e9 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -34,7 +34,6 @@
 from yt.utilities.performance_counters import yt_counters, time_function
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
-import matplotlib.pylab as pl
 from copy import deepcopy
 from yt.config import ytcfg
 from time import time



https://bitbucket.org/yt_analysis/yt/changeset/27258ef17c49/
changeset:   27258ef17c49
branch:      yt
user:        samskillman
date:        2012-06-08 17:56:32
summary:     bugfix for lighting, improved load balancing with static,1 for plane parallel and dynamic,100 for arbitrary vectors.  this is a small improvement for plane parallel, and an improvement from 16 to 12 seconds for a perspective camera.  finally, change the __init__ for PerspectiveCamera to improve portability.
affected #:  3 files

diff -r d934245c03baaaeadffc62ff4f5178dfa57cc4e9 -r 27258ef17c4936ad4522688a52012a10ac32102a yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -119,7 +119,7 @@
         if fid != -1: istorage[i] *= istorage[fid]
     for i in range(6):
         trgba[i] = istorage[field_table_ids[i]]
-    ta = expf(-fmax(dt*(trgba[0] + trgba[1] + trgba[2]), 0.0))
+    ta = fmax(1.0-dt*(trgba[0] + trgba[1] + trgba[2]), 0.0)
     for i in range(3):
         rgba[i] = (1.-ta)*trgba[i]*(1. + dot_prod*l_rgba[i]) + ta * rgba[i]
 


diff -r d934245c03baaaeadffc62ff4f5178dfa57cc4e9 -r 27258ef17c4936ad4522688a52012a10ac32102a yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -266,7 +266,7 @@
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 idata.supp_data = self.supp_data
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="dynamic"):
+                for j in prange(size, schedule="static",chunksize=1):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]
@@ -291,7 +291,7 @@
                 v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 # If we do not have a simple image plane, we have to cast all
                 # our rays 
-                for j in prange(size, schedule="guided"):
+                for j in prange(size, schedule="dynamic", chunksize=100):
                     offset = j * 3
                     for i in range(3): v_pos[i] = im.vp_pos[i + offset]
                     for i in range(3): v_dir[i] = im.vp_dir[i + offset]


diff -r d934245c03baaaeadffc62ff4f5178dfa57cc4e9 -r 27258ef17c4936ad4522688a52012a10ac32102a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -618,24 +618,10 @@
 data_object_registry["interactive_camera"] = InteractiveCamera
 
 class PerspectiveCamera(Camera):
-    def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
-                 north_vector = None, steady_north=False,
-                 volume = None, fields = None,
-                 log_fields = None,
-                 sub_samples = 5, pf = None,
-                 use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain', expand_factor=1.0,
-                 le=None, re=None, use_light=False):
-        self.expand_factor = expand_factor
-        Camera.__init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
-                 north_vector=north_vector, steady_north=steady_north,
-                 volume=volume, fields=fields,
-                 log_fields=log_fields,
-                 sub_samples=sub_samples, pf=pf,
-                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-                 tree_type=tree_type, le=le, re=re, use_light=use_light)
+    expand_factor = 1.0
+    def __init__(self, *args, **kwargs):
+        expand_factor = kwargs.pop('expand_factor', 1.0)
+        Camera.__init__(self, *args, **kwargs)
 
     def get_sampler_args(self, image):
         # We should move away from pre-generation of vectors like this and into



https://bitbucket.org/yt_analysis/yt/changeset/ec374f56d5b6/
changeset:   ec374f56d5b6
branch:      yt
user:        jwise77
date:        2012-06-08 23:04:48
summary:     Adding include_dirs for files that need .h files.  This was needed on
the two machines that I have installed yt-refactor on.  Also I've
fixed an oversight during the merge process in the fisheye camera.
affected #:  2 files

diff -r 27258ef17c4936ad4522688a52012a10ac32102a -r ec374f56d5b6065b0f56086197012f23261c2827 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -112,16 +112,19 @@
     config.add_extension("ContourFinding", 
                 ["yt/utilities/lib/ContourFinding.pyx",
                  "yt/utilities/lib/union_find.c"],
+                include_dirs=["yt/utilities/lib/"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("fortran_reader", 
                 ["yt/utilities/lib/fortran_reader.pyx"],
+                include_dirs=["yt/utilities/lib/"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("freetype_writer", 
                 ["yt/utilities/lib/freetype_writer.pyx"],
-                include_dirs = [freetype_inc,os.path.join(freetype_inc, "freetype2")],
+                include_dirs = [freetype_inc,os.path.join(freetype_inc, "freetype2"),
+                                "yt/utilities/lib"],
                 library_dirs = [freetype_lib], libraries=["freetype"],
                 depends=["yt/utilities/lib/freetype_includes.h"])
     config.add_extension("geometry_utils", 
@@ -135,6 +138,7 @@
     config.add_extension("marching_cubes", 
                 ["yt/utilities/lib/marching_cubes.pyx",
                  "yt/utilities/lib/FixedInterpolator.c"],
+                include_dirs=["yt/utilities/lib/"],
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/utilities/lib/fixed_interpolator.pxd",


diff -r 27258ef17c4936ad4522688a52012a10ac32102a -r ec374f56d5b6065b0f56086197012f23261c2827 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1156,7 +1156,7 @@
             rot_vector = na.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
-            self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
+            self.rotation_matrix = get_rotation_matrix(self,angle,rot_vector)
             self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
             self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
             self.east_vector = na.dot(self.rotation_matrix,self.east_vector)



https://bitbucket.org/yt_analysis/yt/changeset/b911ed3218d9/
changeset:   b911ed3218d9
branch:      yt
user:        samskillman
date:        2012-06-14 04:28:38
summary:     Restoring an interpolated off-axis-projection.  This is now controlled by the keyword interpolated=(True/False), with False as the default.  Additionally, make the off_axis_projection call simply a wrapper around the much more capable ProjectionCamera class.
affected #:  2 files

diff -r ec374f56d5b6065b0f56086197012f23261c2827 -r b911ed3218d9d1f42e9617bd3b764cb631dbde7d yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -322,10 +322,6 @@
     for i in range(imin(3, vc.n_fields)):
         im.rgba[i] += vc.data[i][di] * dl
 
-cdef class ProjectionSampler(ImageSampler):
-    cdef void setup(self, PartitionedGrid pg):
-        self.sampler = projection_sampler
-
 cdef struct VolumeRenderAccumulator:
     int n_fits
     int n_samples
@@ -338,6 +334,71 @@
     np.float64_t *light_dir
     np.float64_t *light_rgba
 
+
+cdef class ProjectionSampler(ImageSampler):
+    cdef void setup(self, PartitionedGrid pg):
+        self.sampler = projection_sampler
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void interpolated_projection_sampler(
+                 VolumeContainer *vc, 
+                 np.float64_t v_pos[3],
+                 np.float64_t v_dir[3],
+                 np.float64_t enter_t,
+                 np.float64_t exit_t,
+                 int index[3],
+                 void *data) nogil:
+    cdef ImageAccumulator *im = <ImageAccumulator *> data
+    cdef VolumeRenderAccumulator *vri = <VolumeRenderAccumulator *> \
+            im.supp_data
+    # we assume this has vertex-centered data.
+    cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+                    + index[1] * (vc.dims[2] + 1) + index[2]
+    cdef np.float64_t slopes[6], dp[3], ds[3]
+    cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
+    cdef np.float64_t dvs[6]
+    for i in range(3):
+        dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+        dp[i] -= index[i] * vc.dds[i] + vc.left_edge[i]
+        dp[i] *= vc.idds[i]
+        ds[i] = v_dir[i] * vc.idds[i] * dt
+    for i in range(vri.n_samples):
+        for j in range(vc.n_fields):
+            dvs[j] = offset_interpolate(vc.dims, dp,
+                    vc.data[j] + offset)
+        for j in range(imin(3, vc.n_fields)):
+            im.rgba[j] += dvs[j] * dt
+        for j in range(3):
+            dp[j] += ds[j]
+
+cdef class InterpolatedProjectionSampler(ImageSampler):
+    cdef VolumeRenderAccumulator *vra
+    cdef public object tf_obj
+    cdef public object my_field_tables
+    def __cinit__(self, 
+                  np.ndarray vp_pos,
+                  np.ndarray vp_dir,
+                  np.ndarray[np.float64_t, ndim=1] center,
+                  bounds,
+                  np.ndarray[np.float64_t, ndim=3] image,
+                  np.ndarray[np.float64_t, ndim=1] x_vec,
+                  np.ndarray[np.float64_t, ndim=1] y_vec,
+                  np.ndarray[np.float64_t, ndim=1] width,
+                  n_samples = 10):
+        ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
+                               x_vec, y_vec, width)
+        cdef int i
+        # Now we handle tf_obj
+        self.vra = <VolumeRenderAccumulator *> \
+            malloc(sizeof(VolumeRenderAccumulator))
+        self.vra.n_samples = n_samples
+        self.supp_data = <void *> self.vra
+
+    cdef void setup(self, PartitionedGrid pg):
+        self.sampler = interpolated_projection_sampler
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)


diff -r ec374f56d5b6065b0f56086197012f23261c2827 -r b911ed3218d9d1f42e9617bd3b764cb631dbde7d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -44,7 +44,7 @@
 
 from yt.utilities.lib import \
     PartitionedGrid, ProjectionSampler, VolumeRenderSampler, \
-    LightSourceRenderSampler, \
+    LightSourceRenderSampler, InterpolatedProjectionSampler, \
     arr_vec2pix_nest, arr_pix2vec_nest, arr_ang2pix_nest, \
     pixelize_healpix, arr_fisheye_vectors
 
@@ -314,7 +314,7 @@
     def finalize_image(self, image):
         pass
 
-    def _render(self, double_check, num_threads, image, na, sampler):
+    def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         if double_check:
@@ -374,7 +374,7 @@
 
         self.initialize_source()
 
-        image = self._render(double_check, num_threads, image, na, sampler)
+        image = self._render(double_check, num_threads, image, sampler)
 
         self.save_image(fn, clip_ratio, image)
 
@@ -720,7 +720,7 @@
         return args
  
 
-    def _render(self, double_check, num_threads, image, na, sampler):
+    def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         if double_check:
@@ -771,7 +771,7 @@
 
         self.volume.initialize_source()
 
-        image = self._render(double_check, num_threads, image, na, sampler)
+        image = self._render(double_check, num_threads, image, sampler)
 
         self.save_image(fn, clim, image)
 
@@ -936,7 +936,7 @@
     def finalize_image(self, image):
         image.shape = self.resolution, self.resolution, 3
 
-    def _render(self, double_check, num_threads, image, na, sampler):
+    def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         if double_check:
@@ -1346,110 +1346,6 @@
                 self.center += dx
             yield self.snapshot()
 
-def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, num_threads = 0):
-    r"""Project through a parameter file, off-axis, and return the image plane.
-
-    This function will accept the necessary items to integrate through a volume
-    at an arbitrary angle and return the integrated field of view to the user.
-    Note that if a weight is supplied, it will multiply the pre-interpolated
-    values together, then create cell-centered values, then interpolate within
-    the cell to conduct the integration.
-
-    Parameters
-    ----------
-    pf : `~yt.data_objects.api.StaticOutput`
-        This is the parameter file to volume render.
-    center : array_like
-        The current 'center' of the view port -- the focal point for the
-        camera.
-    normal_vector : array_like
-        The vector between the camera position and the center.
-    width : float or list of floats
-        The current width of the image.  If a single float, the volume is
-        cubical, but if not, it is left/right, top/bottom, front/back
-    resolution : int or list of ints
-        The number of pixels in each direction.
-    field : string
-        The field to project through the volume
-    weight : optional, default None
-        If supplied, the field will be pre-multiplied by this, then divided by
-        the integrated value of this field.  This returns an average rather
-        than a sum.
-
-    Returns
-    -------
-    image : array
-        An (N,N) array of the final integrated values, in float64 form.
-
-    Examples
-    --------
-
-    >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
-                      0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
-
-    """
-    # We manually modify the ProjectionTransferFunction to get it to work the
-    # way we want, with a second field that's also passed through.
-    fields = [field]
-
-    if weight is not None:
-        # This is a temporary field, which we will remove at the end.
-        def _wf(f1, w1):
-            def WeightField(field, data):
-                return data[f1].astype("float64") * \
-                       data[w1].astype("float64")
-            return WeightField
-        pf.field_info.add_field("temp_weightfield",
-                    function=_wf(field, weight))
-        fields = ["temp_weightfield", weight]
-    image = na.zeros((resolution, resolution, 3), dtype='float64',
-                      order='C')
-    normal_vector, north_vector, east_vector = ortho_find(normal_vector)
-    unit_vectors = [north_vector, east_vector, normal_vector]
-    back_center= center - 0.5*width * normal_vector
-    rotp = na.concatenate([na.linalg.pinv(unit_vectors).ravel('F'),
-                           back_center])
-    sampler = ProjectionSampler(
-        rotp, normal_vector * width, back_center,
-        (-width/2, width/2, -width/2, width/2),
-        image, north_vector, east_vector,
-        na.array([width, width, width], dtype='float64'))
-    # Calculate the eight corners of the box
-    # Back corners ...
-    mi = pf.domain_right_edge.copy()
-    ma = pf.domain_left_edge.copy()
-    for off1 in [-1, 1]:
-        for off2 in [-1, 1]:
-            for off3 in [-1, 1]:
-                this_point = (center + width/2.0 * off1 * north_vector
-                                     + width/2.0 * off2 * east_vector
-                                     + width/2.0 * off3 * normal_vector)
-                na.minimum(mi, this_point, mi)
-                na.maximum(ma, this_point, ma)
-    # Now we have a bounding box.
-    grids = pf.h.region(center, mi, ma)._grids
-    pb = get_pbar("Sampling ", len(grids))
-    for i,grid in enumerate(grids):
-        data = [(grid[field] * grid.child_mask).astype("float64")
-                for field in fields]
-        pg = PartitionedGrid(
-            grid.id, data,
-            grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
-        grid.clear_data()
-        sampler(pg)
-        pb.update(i)
-    pb.finish()
-    image = sampler.aimage
-    if weight is None:
-        dl = width * pf.units[pf.field_info[field].projection_conversion]
-        image *= dl
-    else:
-        image[:,:,0] /= image[:,:,1]
-        pf.field_info.pop("temp_weightfield")
-    return image[:,:,0]
-
 def allsky_projection(pf, center, radius, nside, field, weight = None,
                       inner_radius = 10, rotation = None):
     r"""Project through a parameter file, through an allsky-method
@@ -1577,22 +1473,50 @@
 
 class ProjectionCamera(Camera):
     def __init__(self, center, normal_vector, width, resolution,
-            field, weight=None, volume=None, le=None, re=None,
-            north_vector=None, pf=None):
-        Camera.__init__(self, center, normal_vector, width, resolution, None,
-                fields = field, pf=pf, volume=1,
-                le=le, re=re, north_vector=north_vector)
+            field, weight=None, volume=None, no_ghost = False, 
+            le=None, re=None,
+            north_vector=None, pf=None, interpolated=False):
+
+        if not interpolated:
+            volume = 1
+
+        self.interpolated = interpolated
         self.field = field
         self.weight = weight
         self.resolution = resolution
 
+        fields = [field]
+        if self.weight is not None:
+            # This is a temporary field, which we will remove at the end.
+            def _make_wf(f, w):
+                def temp_weightfield(a, b):
+                    tr = b[f].astype("float64") * b[w]
+                    return tr
+                return temp_weightfield
+            pf.field_info.add_field("temp_weightfield",
+                function=_make_wf(self.field, self.weight))
+            fields = ["temp_weightfield", self.weight]
+        
+        self.fields = fields
+        self.log_fields = [False]*len(self.fields)
+        Camera.__init__(self, center, normal_vector, width, resolution, None,
+                fields = fields, pf=pf, volume=volume,
+                log_fields=self.log_fields, 
+                le=le, re=re, north_vector=north_vector,
+                no_ghost=no_ghost)
+
     def get_sampler(self, args):
-        sampler = ProjectionSampler(*args)
+        if self.interpolated:
+            sampler = InterpolatedProjectionSampler(*args)
+        else:
+            sampler = ProjectionSampler(*args)
         return sampler
 
     def initialize_source(self):
-        pass
-
+        if self.interpolated:
+            Camera.initialize_source(self)
+        else:
+            pass
 
     def get_sampler_args(self, image):
         width = self.width[2]
@@ -1607,7 +1531,8 @@
         args = (rotp, normal_vector * width, back_center,
             (-width/2, width/2, -width/2, width/2),
             image, north_vector, east_vector,
-            na.array([width, width, width], dtype='float64'))
+            na.array([width, width, width], dtype='float64'),
+            self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1617,13 +1542,15 @@
             image *= dl
         else:
             image[:,:,0] /= image[:,:,1]
-            pf.field_info.pop("temp_weightfield")
         return image[:,:,0]
 
 
-    def _render(self, double_check, num_threads, image, na, sampler):
+    def _render(self, double_check, num_threads, image, sampler):
         # Calculate the eight corners of the box
         # Back corners ...
+        if self.interpolated:
+            return Camera._render(self, double_check, num_threads, image,
+                    sampler)
         pf = self.pf
         width = self.width[2]
         north_vector = self.orienter.unit_vectors[0]
@@ -1655,7 +1582,7 @@
             sampler(pg, num_threads = num_threads)
             pb.update(i)
         pb.finish()
-        
+
         image = sampler.aimage
         self.finalize_image(image)
         return image
@@ -1678,21 +1605,89 @@
 
         fields = [self.field]
         resolution = self.resolution
-        pf = self.pf
-        if self.weight is not None:
-            # This is a temporary field, which we will remove at the end.
-            def _make_wf(f, w):
-                def temp_weightfield(a, b):
-                    tr = b[f].astype("float64") * b[w]
-                    return tr
-                return temp_weightfield
-            pf.field_info.add_field("temp_weightfield",
-                function=_make_wf(self.field, self.weight))
-            fields = ["temp_weightfield", self.weight]
-        self.fields = fields
-        return Camera.snapshot(self, fn = fn, clip_ratio = clip_ratio, double_check = double_check,
-                 num_threads = num_threads)
 
+        image = self.new_image()
+
+        args = self.get_sampler_args(image)
+
+        sampler = self.get_sampler(args)
+
+        self.initialize_source()
+        
+        image = self._render(double_check, num_threads, image, sampler)
+
+        self.save_image(fn, clip_ratio, image)
+
+        return image
 
 data_object_registry["projection_camera"] = ProjectionCamera
 
+def off_axis_projection(pf, center, normal_vector, width, resolution,
+                        field, weight = None, num_threads = 0, 
+                        volume = None, no_ghost = False, interpolated = False):
+    r"""Project through a parameter file, off-axis, and return the image plane.
+
+    This function will accept the necessary items to integrate through a volume
+    at an arbitrary angle and return the integrated field of view to the user.
+    Note that if a weight is supplied, it will multiply the pre-interpolated
+    values together, then create cell-centered values, then interpolate within
+    the cell to conduct the integration.
+
+    Parameters
+    ----------
+    pf : `~yt.data_objects.api.StaticOutput`
+        This is the parameter file to volume render.
+    center : array_like
+        The current 'center' of the view port -- the focal point for the
+        camera.
+    normal_vector : array_like
+        The vector between the camera position and the center.
+    width : float or list of floats
+        The current width of the image.  If a single float, the volume is
+        cubical, but if not, it is left/right, top/bottom, front/back
+    resolution : int or list of ints
+        The number of pixels in each direction.
+    field : string
+        The field to project through the volume
+    weight : optional, default None
+        If supplied, the field will be pre-multiplied by this, then divided by
+        the integrated value of this field.  This returns an average rather
+        than a sum.
+    volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
+        The volume to ray cast through.  Can be specified for finer-grained
+        control, but otherwise will be automatically generated.
+    no_ghost: bool, optional
+        Optimization option.  If True, homogenized bricks will
+        extrapolate out from grid instead of interpolating from
+        ghost zones that have to first be calculated.  This can
+        lead to large speed improvements, but at a loss of
+        accuracy/smoothness in resulting image.  The effects are
+        less notable when the transfer function is smooth and
+        broad. Default: True
+    interpolated : optional, default False
+        If True, the data is first interpolated to vertex-centered data, 
+        then tri-linearly interpolated along the ray. Not suggested for 
+        quantitative studies.
+
+    Returns
+    -------
+    image : array
+        An (N,N) array of the final integrated values, in float64 form.
+
+    Examples
+    --------
+
+    >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
+                      0.2, N, "Temperature", "Density")
+    >>> write_image(na.log10(image), "offaxis.png")
+
+    """
+    projcam = ProjectionCamera(center, normal_vector, width, resolution,
+            field, weight=weight, pf=pf, volume=volume,
+            no_ghost=no_ghost, interpolated=interpolated)
+    image = projcam.snapshot(num_threads=num_threads)
+    if weight is not None:
+        pf.field_info.pop("temp_weightfield")
+    del projcam
+    return image
+



https://bitbucket.org/yt_analysis/yt/changeset/de09272069b4/
changeset:   de09272069b4
branch:      yt
user:        samskillman
date:        2012-06-14 04:32:24
summary:     Comment out debugging print statements.
affected #:  2 files

diff -r b911ed3218d9d1f42e9617bd3b764cb631dbde7d -r de09272069b4a0b546c9d4fded62acf2960547fb yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1156,12 +1156,12 @@
 
         """
         if self.tree is None: 
-            print 'No KD Tree Exists'
+            mylog.error('No KD Tree Exists')
             return
         self.image = image
 
         viewpoint = back_center 
-        print 'Moving from front_center to back_center:',front_center, back_center
+        # print 'Moving from front_center to back_center:',front_center, back_center
 
         for node in self.viewpoint_traverse(viewpoint):
             if node.grid is not None:
@@ -1204,9 +1204,9 @@
             else:
                 front = parent.left_child
                 back = parent.right_child
-            print 'Combining', viewpoint, parent.split_ax, parent.split_pos
-            print front.l_corner, front.r_corner
-            print back.l_corner, back.r_corner
+            # print 'Combining', viewpoint, parent.split_ax, parent.split_pos
+            # print front.l_corner, front.r_corner
+            # print back.l_corner, back.r_corner
 
             # mylog.debug('front owner %i back owner %i parent owner %i'%( front.owner, back.owner, parent.owner))
             # Send the images around


diff -r b911ed3218d9d1f42e9617bd3b764cb631dbde7d -r de09272069b4a0b546c9d4fded62acf2960547fb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1108,7 +1108,7 @@
             self.wg = my_wg
             self.imi = int(self.wg.name[0:4])
             self.imj = int(self.wg.name[5:9])
-            print 'My new communicator has the name %s' % self.wg.name
+            mylog.info('My new communicator has the name %s' % self.wg.name)
             self.nimx = nimx
             self.nimy = nimy
         else:
@@ -1588,8 +1588,6 @@
         return image
 
     def save_image(self, fn, clip_ratio, image):
-        print 'I am here!'
-        print fn 
         if self.pf.field_info[self.field].take_log:
             im = na.log10(image)
         else:



https://bitbucket.org/yt_analysis/yt/changeset/0a1f0275ec84/
changeset:   0a1f0275ec84
branch:      yt
user:        samskillman
date:        2012-06-14 04:55:11
summary:     Adding some docs
affected #:  1 file

diff -r de09272069b4a0b546c9d4fded62acf2960547fb -r 0a1f0275ec841c0cec79e6609f39e1cc76e68cb9 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -360,6 +360,13 @@
         clip_ratio : float, optional
             If supplied, the 'max_val' argument to write_bitmap will be handed
             clip_ratio * image.std()
+        double_check : bool, optional
+            Optionally makes sure that the data contains only valid entries.
+            Used for debugging.
+        num_threads : int, optional
+            If supplied, will use 'num_threads' number of OpenMP threads during
+            the rendering.  Defaults to 0, which uses the environment variable
+            OMP_NUM_THREADS.
 
         Returns
         -------
@@ -776,6 +783,7 @@
         self.save_image(fn, clim, image)
 
         return image
+
     def save_image(self, fn, clim, image):
         if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
@@ -1611,12 +1619,13 @@
         sampler = self.get_sampler(args)
 
         self.initialize_source()
-        
+
         image = self._render(double_check, num_threads, image, sampler)
 
         self.save_image(fn, clip_ratio, image)
 
         return image
+    snapshot.__doc__ = Camera.snapshot.__doc__
 
 data_object_registry["projection_camera"] = ProjectionCamera
 



https://bitbucket.org/yt_analysis/yt/changeset/3e6147609e6d/
changeset:   3e6147609e6d
branch:      yt
user:        samskillman
date:        2012-06-14 05:14:40
summary:     Only return the [:,:,0] part of the image from a projection camera.
affected #:  1 file

diff -r 0a1f0275ec841c0cec79e6609f39e1cc76e68cb9 -r 3e6147609e6d4c108d1d5f83362b7bc28c36658c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1696,5 +1696,5 @@
     if weight is not None:
         pf.field_info.pop("temp_weightfield")
     del projcam
-    return image
+    return image[:,:,0]
 



https://bitbucket.org/yt_analysis/yt/changeset/81c5f09b62f6/
changeset:   81c5f09b62f6
branch:      yt
user:        MatthewTurk
date:        2012-06-14 14:27:02
summary:     Updating install script to include hg 2.2.2 and fixing a bool/uint8 bug.
affected #:  3 files

diff -r 3e6147609e6d4c108d1d5f83362b7bc28c36658c -r 81c5f09b62f66fc3b2a6bc805753e1baf35fc9ca doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -366,7 +366,7 @@
 echo 'ffc5c9e0c8c8ea66479abd467e442419bd1c867e6dbd180be6a032869467955dc570cfdf1388452871303a440738f302d3227ab7728878c4a114cfc45d29d23c  ipython-0.12.tar.gz' > ipython-0.12.tar.gz.sha512
 echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
 echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo '702f67c48e4dbe191dbe5ca0df6b5a84fa4f5c424cf1fae60b5053dfe6532531330738c7aa3012d900d49efdd743cd1ebc238bb15f354f67228e2a2c95b98a89  mercurial-2.2.tar.gz' > mercurial-2.2.tar.gz.sha512
+echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -388,7 +388,7 @@
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
-get_enzotools mercurial-2.2.tar.gz
+get_enzotools mercurial-2.2.2.tar.gz
 get_enzotools ipython-0.12.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
 get_enzotools Cython-0.16.tar.gz
@@ -531,7 +531,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2
+    do_setup_py mercurial-2.2.2
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.


diff -r 3e6147609e6d4c108d1d5f83362b7bc28c36658c -r 81c5f09b62f66fc3b2a6bc805753e1baf35fc9ca yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -580,7 +580,7 @@
     def _get_list_of_grids(self):
         gi = ortho_ray_grids(self, 
                 self.hierarchy.grid_left_edge,
-                self.hierarchy.grid_right_edge)
+                self.hierarchy.grid_right_edge).view("bool")
         self._grids = self.hierarchy.grids[gi]
 
     @restore_grid_state
@@ -654,7 +654,7 @@
     def _get_list_of_grids(self):
         gi = ray_grids(self,
                 self.hierarchy.grid_left_edge,
-                self.hierarchy.grid_right_edge)
+                self.hierarchy.grid_right_edge).view("bool")
         self._grids = self.hierarchy.grids[gi]
 
     @restore_grid_state
@@ -1057,7 +1057,7 @@
     def _get_list_of_grids(self):
         gi = slice_grids(self, 
                 self.hierarchy.grid_left_edge,
-                self.hierarchy.grid_right_edge)
+                self.hierarchy.grid_right_edge).view("bool")
         self._grids = self.hierarchy.grids[gi]
 
     def __cut_mask_child_mask(self, grid):
@@ -1215,7 +1215,7 @@
     def _get_list_of_grids(self):
         gridi = cutting_plane_grids(self, self.pf.h.grid_left_edge,
                                           self.pf.h.grid_right_edge)
-        self._grids = self.hierarchy.grids[gridi.astype("bool")]
+        self._grids = self.hierarchy.grids[gridi.view("bool")]
 
     @cache_mask
     def _get_cut_mask(self, grid):


diff -r 3e6147609e6d4c108d1d5f83362b7bc28c36658c -r 81c5f09b62f66fc3b2a6bc805753e1baf35fc9ca yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -58,7 +58,7 @@
     cdef int py_ax = dobj.py_ax
     cdef np.float64_t px = dobj.px
     cdef np.float64_t py = dobj.py
-    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32_t')
+    cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.zeros(ng, dtype='uint8')
     for i in range(ng):
         if (    (px >= left_edges[i, px])
             and (px < right_edges[i, px])
@@ -72,7 +72,7 @@
     cdef int i, ax
     cdef int i1, i2
     cdef int ng = left_edges.shape[0]
-    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.zeros(ng, dtype='uint8')
     cdef np.float64_t vs[3], t, p0[3], p1[3], v[3]
     for i in range(3):
         p0[i] = dobj.start_point[i]
@@ -125,7 +125,7 @@
                       np.ndarray[np.float64_t, ndim=2] right_edges):
     cdef int i, ax
     cdef int ng = left_edges.shape[0]
-    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.zeros(ng, dtype='uint8')
     ax = dobj.axis
     cdef np.float64_t coord = dobj.coord
     for i in range(ng):
@@ -138,7 +138,7 @@
                         np.ndarray[np.float64_t, ndim=2] right_edges):
     cdef int i
     cdef int ng = left_edges.shape[0]
-    cdef np.ndarray[np.int32_t, ndim=1] gridi = np.zeros(ng, dtype='int32')
+    cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.zeros(ng, dtype='uint8')
     cdef np.float64_t *arr[2]
     arr[0] = <np.float64_t *> left_edges.data
     arr[1] = <np.float64_t *> right_edges.data
@@ -183,7 +183,7 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 def cutting_plane_cells(dobj, gobj):
-    cdef np.ndarray[np.int32_t, ndim=3] mask 
+    cdef np.ndarray[np.uint8_t, ndim=3] mask 
     cdef np.ndarray[np.float64_t, ndim=1] left_edge = gobj.LeftEdge
     cdef np.ndarray[np.float64_t, ndim=1] dds = gobj.dds
     cdef int i, j, k
@@ -191,7 +191,7 @@
     cdef np.float64_t norm_vec[3]
     cdef np.float64_t d = dobj._d
 
-    mask = np.zeros(gobj.ActiveDimensions, dtype='int32')
+    mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
     for i in range(3): norm_vec[i] = dobj._norm_vec[i]
     dist = 0.5*(dds[0]*dds[0] + dds[1]*dds[1] + dds[2]*dds[2])**0.5
     x = left_edge[0] + dds[0] * 0.5



https://bitbucket.org/yt_analysis/yt/changeset/e92274196c97/
changeset:   e92274196c97
branch:      yt
user:        MatthewTurk
date:        2012-06-14 14:48:56
summary:     Reverting to the previous version of data_containers.  This removes the usage
of the new Cython object finders, but I would prefer those be held off on until
3.0.
affected #:  1 file

diff -r 81c5f09b62f66fc3b2a6bc805753e1baf35fc9ca -r e92274196c970ec344181f349803320c869ca57d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,8 +41,7 @@
 from yt.utilities.lib import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
     QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
-    march_cubes_grid, march_cubes_grid_flux, ortho_ray_grids, ray_grids, \
-    slice_grids, cutting_plane_grids, cutting_plane_cells
+    march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -578,10 +577,12 @@
         return (self.px, self.py)
 
     def _get_list_of_grids(self):
-        gi = ortho_ray_grids(self, 
-                self.hierarchy.grid_left_edge,
-                self.hierarchy.grid_right_edge).view("bool")
-        self._grids = self.hierarchy.grids[gi]
+        # This bugs me, but we will give the tie to the LeftEdge
+        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
+                    & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
+                    & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
+                    & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
+        self._grids = self.hierarchy.grids[y]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
@@ -652,10 +653,31 @@
         #self._refresh_data()
 
     def _get_list_of_grids(self):
-        gi = ray_grids(self,
-                self.hierarchy.grid_left_edge,
-                self.hierarchy.grid_right_edge).view("bool")
-        self._grids = self.hierarchy.grids[gi]
+        # Get the value of the line at each LeftEdge and RightEdge
+        LE = self.pf.h.grid_left_edge
+        RE = self.pf.h.grid_right_edge
+        p = na.zeros(self.pf.h.num_grids, dtype='bool')
+        # Check left faces first
+        for i in range(3):
+            i1 = (i+1) % 3
+            i2 = (i+2) % 3
+            vs = self._get_line_at_coord(LE[:,i], i)
+            p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
+                    & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
+            vs = self._get_line_at_coord(RE[:,i], i)
+            p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
+                    & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
+        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
+                & na.all( RE >= self.start_point, axis=1 ) )
+        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
+                & na.all( RE >= self.end_point,   axis=1 ) )
+        self._grids = self.hierarchy.grids[p]
+
+    def _get_line_at_coord(self, v, index):
+        # t*self.vec + self.start_point = self.end_point
+        t = (v - self.start_point[index])/self.vec[index]
+        t = t.reshape((t.shape[0],1))
+        return self.start_point + t*self.vec
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
@@ -1055,10 +1077,9 @@
         self.ActiveDimensions = (t.shape[1], 1, 1)
 
     def _get_list_of_grids(self):
-        gi = slice_grids(self, 
-                self.hierarchy.grid_left_edge,
-                self.hierarchy.grid_right_edge).view("bool")
-        self._grids = self.hierarchy.grids[gi]
+        goodI = ((self.pf.h.grid_right_edge[:,self.axis] > self.coord)
+              &  (self.pf.h.grid_left_edge[:,self.axis] <= self.coord ))
+        self._grids = self.pf.h.grids[goodI] # Using sources not hierarchy
 
     def __cut_mask_child_mask(self, grid):
         mask = grid.child_mask.copy()
@@ -1213,9 +1234,25 @@
         return self._norm_vec
 
     def _get_list_of_grids(self):
-        gridi = cutting_plane_grids(self, self.pf.h.grid_left_edge,
-                                          self.pf.h.grid_right_edge)
-        self._grids = self.hierarchy.grids[gridi.view("bool")]
+        # Recall that the projection of the distance vector from a point
+        # onto the normal vector of a plane is:
+        # D = (a x_0 + b y_0 + c z_0 + d)/sqrt(a^2+b^2+c^2)
+        # @todo: Convert to using corners
+        LE = self.pf.h.grid_left_edge
+        RE = self.pf.h.grid_right_edge
+        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
+                             [RE[:,0],RE[:,1],RE[:,2]],
+                             [LE[:,0],LE[:,1],RE[:,2]],
+                             [RE[:,0],RE[:,1],LE[:,2]],
+                             [LE[:,0],RE[:,1],RE[:,2]],
+                             [RE[:,0],LE[:,1],LE[:,2]],
+                             [LE[:,0],RE[:,1],LE[:,2]],
+                             [RE[:,0],LE[:,1],RE[:,2]]])
+        # This gives us shape: 8, 3, n_grid
+        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        self.D = D
+        self._grids = self.hierarchy.grids[
+            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
 
     @cache_mask
     def _get_cut_mask(self, grid):
@@ -3377,8 +3414,6 @@
             The left edge of the region to be extracted
         dims : array_like
             Number of cells along each axis of resulting covering_grid
-        right_edge : array_like, optional
-            The right edge of the region to be extracted
         fields : array_like, optional
             A list of fields that you'd like pre-generated for your object
 
@@ -3537,16 +3572,13 @@
         left_edge : array_like
             The left edge of the region to be extracted
         dims : array_like
-            Number of cells along each axis of resulting covering_grid
-        right_edge : array_like, optional
-            The right edge of the region to be extracted
+            Number of cells along each axis of resulting covering_grid.
         fields : array_like, optional
             A list of fields that you'd like pre-generated for your object
 
         Example
         -------
         cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
-                                  right_edge=[1.0, 1.0, 1.0],
                                   dims=[128, 128, 128])
         """
         self._base_dx = (
@@ -3585,10 +3617,16 @@
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
+                mylog.debug("Updating level state to %s", last_level + 1)
                 self._update_level_state(last_level + 1)
                 self._refine(1, fields_to_get)
                 last_level = grid.Level
             self._get_data_from_grid(grid, fields_to_get)
+        while last_level < self.level:
+            mylog.debug("Grid-free refinement %s to %s", last_level, last_level + 1)
+            self._update_level_state(last_level + 1)
+            self._refine(1, fields_to_get)
+            last_level += 1
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]



https://bitbucket.org/yt_analysis/yt/changeset/ef9173b7076f/
changeset:   ef9173b7076f
branch:      yt
user:        ngoldbaum
date:        2012-06-16 00:54:41
summary:     Merged in MatthewTurk/yt-refactor (pull request #168)
affected #:  110 files
Diff too large to display.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list