[yt-svn] commit/yt-3.0: 67 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Oct 10 14:52:41 PDT 2013


67 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/3ac9490aefbc/
Changeset:   3ac9490aefbc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-01 16:25:59
Summary:     Making all IO handlers have a reference to the parameter file.
Affected #:  8 files

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -198,9 +198,6 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class ChomboStaticOutput(StaticOutput):
     _hierarchy_class = ChomboHierarchy
     _fieldinfo_fallback = ChomboFieldInfo

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -572,10 +572,6 @@
                     result[p] = result[p][0:max_num]
         return result
 
-    def _setup_data_io(self):
-            self.io = io_registry[self.data_style](self.parameter_file)
-
-
 class EnzoHierarchyInMemory(EnzoHierarchy):
 
     grid = EnzoGridInMemory

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -199,9 +199,6 @@
                 # Translating an already-converted field
                 self.parameter_file.conversion_factors[field] = 1.0 
                 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
     _fieldinfo_fallback = FLASHFieldInfo

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -170,9 +170,6 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class GDFStaticOutput(StaticOutput):
     _hierarchy_class = GDFHierarchy
     _fieldinfo_fallback = GDFFieldInfo

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -68,9 +68,6 @@
     def _count_grids(self):
         self.num_grids = 1 #self._fhandle['/grid_parent_id'].shape[0]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class MoabHex8StaticOutput(StaticOutput):
     _hierarchy_class = MoabHex8Hierarchy
     _fieldinfo_fallback = MoabFieldInfo

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -168,9 +168,6 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class PlutoStaticOutput(StaticOutput):
     _hierarchy_class = PlutoHierarchy
     _fieldinfo_fallback = PlutoFieldInfo

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -329,7 +329,7 @@
 
     def _setup_data_io(self):
         if getattr(self, "io", None) is not None: return
-        self.io = io_registry[self.data_style]()
+        self.io = io_registry[self.data_style](self.parameter_file)
 
     def _save_data(self, array, node, name, set_attr=None, force=False, passthrough = False):
         """

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 3ac9490aefbc44aebc86a70e426bf406a57f190e yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -36,8 +36,9 @@
             if hasattr(cls, "_data_style"):
                 io_registry[cls._data_style] = cls
 
-    def __init__(self):
+    def __init__(self, pf):
         self.queue = defaultdict(dict)
+        self.pf = pf
 
     # We need a function for reading a list of sets
     # and a function for *popping* from a queue all the appropriate sets


https://bitbucket.org/yt_analysis/yt-3.0/commits/7cf88aae8d98/
Changeset:   7cf88aae8d98
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-01 20:19:36
Summary:     Starting a refactor of particle concatenation into the base class.
Affected #:  4 files

diff -r 3ac9490aefbc44aebc86a70e426bf406a57f190e -r 7cf88aae8d98592091f9ab272a3ffa2b881df206 yt/frontends/sph/definitions.py
--- a/yt/frontends/sph/definitions.py
+++ b/yt/frontends/sph/definitions.py
@@ -0,0 +1,5 @@
+
+gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
+ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
+                 "PartType4", "PartType5")
+

diff -r 3ac9490aefbc44aebc86a70e426bf406a57f190e -r 7cf88aae8d98592091f9ab272a3ffa2b881df206 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -28,6 +28,9 @@
     NullFunc, \
     TranslationFunc
 import yt.data_objects.universal_fields
+from .definitions import \
+    gadget_ptypes, \
+    ghdf5_ptypes
 from yt.data_objects.particle_fields import \
     particle_deposition_functions, \
     particle_scalar_functions, \
@@ -105,24 +108,9 @@
 
     # This has to be done manually for Gadget, because some of the particles will
     # have uniform mass
-    def _gadget_particle_fields(ptype):
-        def _Mass(field, data):
-            pind = ptypes.index(ptype)
-            if data.pf["Massarr"][pind] == 0.0:
-                return data[ptype, "Masses"].copy()
-            mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
-            # Note that this is an alias, which is why we need to apply conversion
-            # here.  Otherwise we'd have an asymmetry.
-            mass *= data.pf["Massarr"][pind] * data.convert("mass")
-            return mass
-        field_registry.add_field((ptype, "Mass"), function=_Mass,
-                                  particle_type = True)
-
     for fname in ["Coordinates", "Velocities", "ParticleIDs",
-                  # Note: Mass, not Masses
-                  "Mass", "particle_index"]:
-        func = _field_concat(fname)
-        field_registry.add_field(("all", fname), function=func,
+                  "Masses", "Mass", "particle_index"]:
+        field_registry.add_field(("all", fname), function=NullFunc,
                 particle_type = True)
 
     for ptype in ptypes:
@@ -130,12 +118,11 @@
             particle_type = True,
             convert_function=_get_conv("mass"),
             units = r"\mathrm{g}")
-        _gadget_particle_fields(ptype)
         known_registry.add_field((ptype, "Velocities"), function=NullFunc,
             particle_type = True,
             convert_function=_get_conv("velocity"),
             units = r"\mathrm{cm}/\mathrm{s}")
-        particle_deposition_functions(ptype, "Coordinates", "Mass", field_registry)
+        particle_deposition_functions(ptype, "Coordinates", "Masses", field_registry)
         particle_scalar_functions(ptype, "Coordinates", "Velocities", field_registry)
         known_registry.add_field((ptype, "Coordinates"), function=NullFunc,
             particle_type = True)
@@ -143,7 +130,7 @@
         GadgetFieldInfo.add_field( (ptype, "particle_index"),
             function = TranslationFunc((ptype, "ParticleIDs")),
             particle_type = True)
-    particle_deposition_functions("all", "Coordinates", "Mass", field_registry)
+    particle_deposition_functions("all", "Coordinates", "Masses", field_registry)
 
     # Now we have to manually apply the splits for "all", since we don't want to
     # use the splits defined above.
@@ -156,14 +143,10 @@
                     particle_type = True)
 
 # Note that we call the same function a few times here.
-_gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
-_ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
-                  "PartType4", "PartType5")
-
-_setup_gadget_fields(_gadget_ptypes,
+_setup_gadget_fields(gadget_ptypes,
     GadgetFieldInfo,
     KnownGadgetFields)
-_setup_gadget_fields(_ghdf5_ptypes,
+_setup_gadget_fields(ghdf5_ptypes,
     GadgetHDF5FieldInfo,
     KnownGadgetHDF5Fields)
 

diff -r 3ac9490aefbc44aebc86a70e426bf406a57f190e -r 7cf88aae8d98592091f9ab272a3ffa2b881df206 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -16,6 +16,7 @@
 
 import h5py
 import numpy as np
+from .definitions import gadget_ptypes, ghdf5_ptypes
 from yt.funcs import *
 from yt.utilities.exceptions import *
 
@@ -29,8 +30,6 @@
 
 CHUNKSIZE = 10000000
 
-_vector_fields = ("Coordinates", "Velocity", "Velocities")
-
 def _get_h5_handle(fn):
     try:
         f = h5py.File(fn, "r")
@@ -45,23 +44,27 @@
 
 class IOHandlerOWLS(BaseIOHandler):
     _data_style = "OWLS"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    _known_ptypes = ghdf5_ptypes
+    _var_mass = None
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.pf["Massarr"]):
+                if v == 0:
+                    vm.append(self._known_ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
         rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        psize = defaultdict(lambda: 0)
         chunks = list(chunks)
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-        # For this type of file, we actually have something slightly different.
-        # We are given a list of ParticleDataChunks, which is composed of
-        # individual ParticleOctreeSubsets.  The data_files attribute on these
-        # may in fact overlap.  So we will iterate over a union of all the
-        # data_files.
         data_files = set([])
         for chunk in chunks:
             for obj in chunk.objs:
@@ -70,21 +73,18 @@
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
-                coords = f["/%s/Coordinates" % ptype][:].astype("float64")
-                psize[ptype] += selector.count_points(
-                    coords[:,0], coords[:,1], coords[:,2])
-                del coords
+                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
+                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
+                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
+                yield ptype, (x, y, z)
             f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
         # Now we have all the sizes, and we can allocate
-        ind = {}
-        for field in fields:
-            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            if field[1] in _vector_fields:
-                shape = (psize[field[0]], 3)
-            else:
-                shape = psize[field[0]]
-            rv[field] = np.empty(shape, dtype="float64")
-            ind[field] = 0
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
         for data_file in data_files:
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
@@ -95,14 +95,14 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
-                    data = g[field][:][mask,...]
-                    my_ind = ind[ptype, field]
-                    mylog.debug("Filling from %s to %s with %s",
-                        my_ind, my_ind+data.shape[0], field)
-                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                    ind[ptype, field] += data.shape[0]
+                    if field == "Masses" and ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        ind = self._known_ptypes.index(ptype) 
+                        data[:] = self.pf["Massarr"][ind]
+                    else:
+                        data = g[field][:][mask,...]
+                    yield (ptype, field), data
             f.close()
-        return rv
 
     def _initialize_index(self, data_file, regions):
         f = _get_h5_handle(data_file.filename)
@@ -145,6 +145,9 @@
                 if not hasattr(g[k], "shape"): continue
                 # str => not unicode!
                 fields.append((ptype, str(k)))
+            if "Masses" not in g.keys():
+                # We'll append it anyway.
+                fields.append((ptype, "Masses"))
         f.close()
         return fields
 
@@ -155,6 +158,7 @@
 
 class IOHandlerGadgetBinary(BaseIOHandler):
     _data_style = "gadget_binary"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     # Particle types (Table 3 in GADGET-2 user guide)
     _ptypes = ( "Gas",
@@ -337,6 +341,7 @@
 
 class IOHandlerTipsyBinary(BaseIOHandler):
     _data_style = "tipsy"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     _pdtypes = None # dtypes, to be filled in later
 

diff -r 3ac9490aefbc44aebc86a70e426bf406a57f190e -r 7cf88aae8d98592091f9ab272a3ffa2b881df206 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -16,17 +16,19 @@
 from collections import defaultdict
 
 import yt.utilities.lib as au
+from yt.funcs import mylog
 import exceptions
 import cPickle
 import os
 import h5py
+import numpy as np
 
 _axis_ids = {0:2,1:1,2:0}
 
 io_registry = {}
 
 class BaseIOHandler(object):
-
+    _vector_fields = ()
     _data_style = None
     _particle_reader = False
 
@@ -112,6 +114,62 @@
     def _read_chunk_data(self, chunk, fields):
         return None
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        ind = {}
+        # We first need a set of masks for each particle type
+        ptf = defaultdict(list)        # ON-DISK TO READ
+        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
+        fsize = defaultdict(lambda: 0) # COUNT RV
+        field_maps = defaultdict(list) # ptypes -> fields
+        chunks = list(chunks)
+        # What we need is a mapping from particle types to return types
+        ondisk_ptypes = [pt for pt in self.pf.particle_types
+                         if pt not in self.pf.known_filters
+                         and pt != "all"]
+        for field in fields:
+            ftype, fname = field
+            fsize[field] = 0
+            # We should add a check for p.fparticle_unions or something here
+            if ftype == "all":
+                for pt in ondisk_ptypes:
+                    ptf[pt].append(fname)
+                    field_maps[pt, fname].append(field)
+            else:
+                ptf[ftype].append(fname)
+                field_maps[field].append(field)
+        # Now we have our full listing.
+        # Here, ptype_map means which particles contribute to a given type.
+        # And ptf is the actual fields from disk to read.
+        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+            psize[ptype] += selector.count_points(x, y, z)
+        # Now we allocate
+        # ptf, remember, is our mapping of what we want to read
+        #for ptype in ptf:
+        for field in fields:
+            if field[0] == "all":
+                for pt in ondisk_ptypes:
+                    fsize[field] += psize[pt]
+            else:
+                fsize[field] += psize[field[0]]
+        for field in fields:
+            if field[1] in self._vector_fields:
+                shape = (fsize[field], 3)
+            else:
+                shape = (fsize[field], )
+            rv[field] = np.empty(shape, dtype="float64")
+            ind[field] = 0
+        # Now we read.
+        for field_r, vals in self._read_particle_fields(chunks, ptf, selector):
+            # Note that we now need to check the mappings
+            for field_f in field_maps[field_r]:
+                my_ind = ind[field_f]
+                mylog.debug("Filling %s from %s to %s with %s",
+                    field_f, my_ind, my_ind+vals.shape[0], field_r)
+                rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
+                ind[field_f] += vals.shape[0]
+        return rv
+
 class IOHandlerExtracted(BaseIOHandler):
 
     _data_style = 'extracted'


https://bitbucket.org/yt_analysis/yt-3.0/commits/25c608e9d9c7/
Changeset:   25c608e9d9c7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-01 21:02:32
Summary:     Velocity/Velocities switch, fixing Rockstar error, and fixing particle_index.
Affected #:  3 files

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -233,7 +233,8 @@
             pmass_min, pmass_max = dd.quantities["Extrema"](
                 (ptype, "ParticleMassMsun"), non_zero = True)[0]
             if pmass_min != pmass_max:
-                raise YTRockstarMultiMassNotSupported
+                raise YTRockstarMultiMassNotSupported(pmass_min, pmass_max,
+                    ptype)
             particle_mass = pmass_min
         # NOTE: We want to take our Msun and turn it into Msun/h .  Its value
         # should be such that dividing by little h gives the original value.
@@ -244,6 +245,7 @@
             # Get total_particles in parallel.
             tp = dd.quantities['TotalQuantity']((ptype, "particle_ones"))[0]
             p['total_particles'] = int(tp)
+            mylog.warning("Total Particle Count: %0.3e", int(tp))
         p['left_edge'] = tpf.domain_left_edge
         p['right_edge'] = tpf.domain_right_edge
         p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -301,7 +301,8 @@
                 return self[item]
         elif finfo is not None and finfo.particle_type:
             if item == "Coordinates" or item[1] == "Coordinates" or \
-               item == "Velocities" or item[1] == "Velocities":
+               item == "Velocities" or item[1] == "Velocities" or \
+               item == "Velocity" or item[1] == "Velocity":
                 # A vector
                 self[item] = np.ones((self.NumberOfParticles, 3))
             else:

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -140,7 +140,7 @@
         known_registry.add_field((ptype, "Coordinates"), function=NullFunc,
             particle_type = True)
         # Now we add some translations.
-        GadgetFieldInfo.add_field( (ptype, "particle_index"),
+        field_registry.add_field( (ptype, "particle_index"),
             function = TranslationFunc((ptype, "ParticleIDs")),
             particle_type = True)
     particle_deposition_functions("all", "Coordinates", "Mass", field_registry)
@@ -177,9 +177,9 @@
 _owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
                 "PartType4", "PartType5")
 
-for fname in ["Coordinates", "Velocities", "ParticleIDs",
+for fname in ["Coordinates", "Velocity", "ParticleIDs",
               # Note: Mass, not Masses
-              "Mass"]:
+              "Mass", "particle_index"]:
     func = _field_concat(fname)
     OWLSFieldInfo.add_field(("all", fname), function=func,
             particle_type = True)
@@ -207,21 +207,24 @@
         convert_function=_get_conv("mass"),
         units = r"\mathrm{g}")
     _owls_particle_fields(ptype)
-    KnownOWLSFields.add_field((ptype, "Velocities"), function=NullFunc,
+    KnownOWLSFields.add_field((ptype, "Velocity"), function=NullFunc,
         particle_type = True,
         convert_function=_get_conv("velocity"),
         units = r"\mathrm{cm}/\mathrm{s}")
     particle_deposition_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
-    particle_scalar_functions(ptype, "Coordinates", "Velocities", OWLSFieldInfo)
+    particle_scalar_functions(ptype, "Coordinates", "Velocity", OWLSFieldInfo)
     KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
         particle_type = True)
+    OWLSFieldInfo.add_field( (ptype, "particle_index"),
+        function = TranslationFunc((ptype, "ParticleIDs")),
+        particle_type = True)
 particle_deposition_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
 
 # Now we have to manually apply the splits for "all", since we don't want to
 # use the splits defined above.
 
 for iname, oname in [("Coordinates", "particle_position_"),
-                     ("Velocities", "particle_velocity_")]:
+                     ("Velocity", "particle_velocity_")]:
     for axi, ax in enumerate("xyz"):
         func = _field_concat_slice(iname, axi)
         OWLSFieldInfo.add_field(("all", oname + ax), function=func,


https://bitbucket.org/yt_analysis/yt-3.0/commits/4f0f69d5b2b7/
Changeset:   4f0f69d5b2b7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-02 23:45:43
Summary:     Merging to get some 3.0 API fixes
Affected #:  6 files

diff -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -637,7 +637,7 @@
         """
 
         sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
-        if len(sphere._grids) == 0: return None
+        #if len(sphere._grids) == 0: return None
         new_sphere = False
 
         if self.recenter:
@@ -819,8 +819,8 @@
                 for hp in self.projection_fields:
                     projections.append(self.pf.h.proj(hp['field'], w,
                                                       weight_field=hp['weight_field'],
-                                                      source=region, center=halo['center'],
-                                                      serialize=False))
+                                                      data_source=region,
+                                                      center=halo['center']))
 
                 # Set x and y limits, shift image if it overlaps domain boundary.
                 if need_per:

diff -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -213,7 +213,7 @@
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
     _con_args = ('axis', 'weight_field')
-    _container_fields = ('px', 'py', 'pdx', 'pdy')
+    _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
                  center = None, pf = None, data_source=None, 
                  style = "integrate", field_parameters = None):

diff -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -241,12 +241,40 @@
                 # Note that we're adding *grids*, not individual cells.
                 if level >= min_level:
                     assert(pos.shape[0] == ng)
-                    n = self.oct_handler.add(cpu + 1, level - min_level, pos)
-                    assert(n == ng)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+                                count_boundary = 1)
+                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
                     if n > 0: max_level = max(level - min_level, max_level)
         self.max_level = max_level
         self.oct_handler.finalize()
 
+    def _error_check(self, cpu, level, pos, n, ng, nn):
+        # NOTE: We have the second conditional here because internally, it will
+        # not add any octs in that case.
+        if n == ng or cpu + 1 > self.oct_handler.num_domains:
+            return
+        # This is where we now check for issues with creating the new octs, and
+        # we attempt to determine what precisely is going wrong.
+        # These are all print statements.
+        print "We have detected an error with the construction of the Octree."
+        print "  The number of Octs to be added :  %s" % ng
+        print "  The number of Octs added       :  %s" % n
+        print "  Level                          :  %s" % level
+        print "  CPU Number (0-indexed)         :  %s" % cpu
+        for i, ax in enumerate('xyz'):
+            print "  extent [%s]                     :  %s %s" % \
+            (ax, pos[:,i].min(), pos[:,i].max())
+        print "  domain left                    :  %s" % \
+            (self.pf.domain_left_edge,)
+        print "  domain right                   :  %s" % \
+            (self.pf.domain_right_edge,)
+        print "  offset applied                 :  %s %s %s" % \
+            (nn[0], nn[1], nn[2])
+        print "AMR Header:"
+        for key in sorted(self.amr_header):
+            print "   %-30s: %s" % (key, self.amr_header[key])
+        raise RuntimeError
+
     def included(self, selector):
         if getattr(selector, "domain_id", None) is not None:
             return selector.domain_id == self.domain_id

diff -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -499,8 +499,10 @@
     @cython.cdivision(True)
     def add(self, int curdom, int curlevel,
             np.ndarray[np.float64_t, ndim=2] pos,
-            int skip_boundary = 1):
+            int skip_boundary = 1,
+            int count_boundary = 0):
         cdef int level, no, p, i, j, k, ind[3]
+        cdef int nb = 0
         cdef Oct *cur, *next = NULL
         cdef np.float64_t pp[3], cp[3], dds[3]
         no = pos.shape[0] #number of octs
@@ -520,7 +522,9 @@
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
                 if ind[i] < 0 or ind[i] >= self.nn[i]:
                     in_boundary = 1
-            if skip_boundary == in_boundary == 1: continue
+            if skip_boundary == in_boundary == 1:
+                nb += count_boundary
+                continue
             cur = self.next_root(curdom, ind)
             if cur == NULL: raise RuntimeError
             # Now we find the location we want
@@ -542,7 +546,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             cur.file_ind = p
-        return cont.n_assigned - initial
+        return cont.n_assigned - initial + nb
 
     def allocate_domains(self, domain_counts):
         cdef int count, i

diff -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -416,8 +416,8 @@
             coord = center[axis]
         if obj is None:
             if field_parameters is None: field_parameters = {}
-            obj = self.pf.hierarchy.slice(axis, coord, field,
-                            center=center, **field_parameters)
+            obj = self.pf.hierarchy.slice(axis, coord, center=center,
+                            field_parameters = field_parameters)
         p = self._add_plot(PCSlicePlot(
                          obj, field, use_colorbar=use_colorbar,
                          axes=axes, figure=figure,
@@ -577,8 +577,8 @@
             center = self.c
         if not obj:
             if field_parameters is None: field_parameters = {}
-            cp = self.pf.hierarchy.cutting(normal, center, field,
-                    **field_parameters)
+            cp = self.pf.hierarchy.cutting(normal, center, 
+                    field_parameters = field_parameters)
         else:
             cp = obj
         p = self._add_plot(CuttingPlanePlot(cp, field,
@@ -676,8 +676,8 @@
             center = self.c
         if obj is None:
             obj = self.pf.hierarchy.proj(field, axis, weight_field,
-                                         source = data_source, center=center,
-                                         **field_parameters)
+                                         data_source = data_source, center=center,
+                                         field_parameters = field_parameters)
         p = self._add_plot(PCProjectionPlot(obj, field,
                          use_colorbar=use_colorbar, axes=axes, figure=figure,
                          size=fig_size, periodic=periodic))
@@ -770,8 +770,8 @@
         RE[axis] += thickness/2.0
         region = self.pf.h.region(center, LE, RE)
         obj = self.pf.hierarchy.proj(field, axis, weight_field,
-                                     source = region, center=center,
-                                     **field_parameters)
+                                     data_source = region, center=center,
+                                     field_parameters = field_parameters)
         p = self._add_plot(PCProjectionPlot(obj, field,
                          use_colorbar=use_colorbar, axes=axes, figure=figure,
                          size=fig_size, periodic=periodic))
@@ -1326,8 +1326,8 @@
         axis = fix_axis(axis)
         if field_parameters is None: field_parameters = {}
         if plot_options is None: plot_options = {}
-        data_source = self.pf.h.ortho_ray(axis, coords, field,
-                        **field_parameters)
+        data_source = self.pf.h.ortho_ray(axis, coords, 
+                        field_parameters = field_parameters)
         p = self._add_plot(LineQueryPlot(data_source,
                 [axis_names[axis], field], self._get_new_id(),
                 figure=figure, axes=axes, plot_options=plot_options))
@@ -1386,8 +1386,8 @@
         """
         if field_parameters is None: field_parameters = {}
         if plot_options is None: plot_options = {}
-        data_source = self.pf.h.ray(start_point, end_point, field,
-                                    **field_parameters)
+        data_source = self.pf.h.ray(start_point, end_point, 
+                                    field_parameters = field_parameters)
         p = self._add_plot(LineQueryPlot(data_source,
                 ['t', field], self._get_new_id(),
                 figure=figure, axes=axes, plot_options=plot_options))

diff -r 25c608e9d9c75abba799ade2a8b92a9b8789d239 -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -488,7 +488,7 @@
         if self.datalabel is None:
             field_name = self.axis_names["Z"]
             proj = "Proj" in self._type_name and \
-                   self.data._weight is None
+                   self.data.weight_field is None
             data_label = self.pf.field_info[field_name].get_label(proj)
         else: data_label = self.datalabel
         if self.colorbar != None:
@@ -576,8 +576,8 @@
 
     def _generate_prefix(self, prefix):
         VMPlot._generate_prefix(self, prefix)
-        if self.data._weight is not None:
-            self.prefix += "_%s" % (self.data._weight)
+        if self.data.weight_field is not None:
+            self.prefix += "_%s" % (self.data.weight_field)
 
 class PCProjectionPlotNaturalNeighbor(NNVMPlot, PCProjectionPlot):
     _type_name = "NNProj"


https://bitbucket.org/yt_analysis/yt-3.0/commits/5cce4557a832/
Changeset:   5cce4557a832
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-03 18:19:06
Summary:     Merging from the stream_octree bookmark.
Affected #:  9 files

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -22,6 +22,7 @@
       load_amr_grids, \
       load_particles, \
       load_hexahedral_mesh, \
+      load_octree, \
       refine_amr
 
 from .fields import \

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -21,12 +21,24 @@
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
 from yt.config import ytcfg
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+from yt.geometry.oct_geometry_handler import \
+    OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
            UnstructuredGeometryHandler
 from yt.data_objects.static_output import \
@@ -973,3 +985,321 @@
 
     return spf
 
+class StreamOctreeSubset(OctreeSubset):
+    domain_id = 1
+    _domain_offset = 1
+    _num_zones = 2
+
+    def __init__(self, base_region, pf, oct_handler):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = oct_handler
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+
+    def fill(self, content, dest, selector, offset):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        levels[:] = 0
+        dest.update((field, np.empty(cell_count, dtype="float64"))
+                    for field in content)
+        # Make references ...
+        count = oct_handler.fill_level(0, levels, cell_inds, file_inds, 
+                                       dest, content, offset)
+        return count
+
+class StreamOctreeHandler(OctreeGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        self.data_style = data_style
+        super(StreamOctreeHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _initialize_oct_handler(self):
+        header = dict(dims = self.pf.domain_dimensions/2,
+                      left_edge = self.pf.domain_left_edge,
+                      right_edge = self.pf.domain_right_edge,
+                      octree = self.pf.octree_mask)
+        self.oct_handler = OctreeContainer.load_octree(header)
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            base_region = getattr(dobj, "base_region", dobj)
+            subset = [StreamOctreeSubset(base_region, self.parameter_file,
+                                         self.oct_handler)]
+            dobj._chunk_info = subset
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g])
+
+    def _chunk_io(self, dobj, cache = True):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(StreamOctreeHandler, self)._setup_classes(dd)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamOctreeStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamOctreeHandler
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_octree"
+
+def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
+                bbox=None, sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load an octree mask into yt.
+
+    Octrees can be saved out by calling save_octree on an OctreeContainer.
+    This enables them to be loaded back in.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    octree_mask : np.ndarray[uint8_t]
+        This is a depth-first refinement mask for an Octree.
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    data : dict
+        A dictionary of 1D arrays.  Note that these must of the size of the
+        number of "False" values in the ``octree_mask``.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({0:data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "OctreeData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamOctreeStaticOutput(handler)
+    spf.octree_mask = octree_mask
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+
+def hexahedral_connectivity(xgrid, ygrid, zgrid):
+    nx = len(xgrid)
+    ny = len(ygrid)
+    nz = len(zgrid)
+    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
+                    dtype=np.float64, count = nx*ny*nz*3)
+    coords.shape = (nx*ny*nz, 3)
+    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                    dtype=np.int64, count = 8*3)
+    cis.shape = (8, 3)
+    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
+                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+    off = cis + cycle[:, np.newaxis]
+    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+    return coords, connectivity
+
+class StreamHexahedralMesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
+class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        coords = self.stream_handler.fields.pop('coordinates')
+        connec = self.stream_handler.fields.pop('connectivity')
+        self.meshes = [StreamHexahedralMesh(0,
+          self.hierarchy_filename, connec, coords, self)]
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamHexahedralStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamHexahedralHierarchy
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_hexahedral"
+
+def load_hexahedral_mesh(data, connectivity, coordinates,
+                         sim_unit_to_cm, bbox=None,
+                         sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a hexahedral mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a semistructured grid of data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        There must only be one.
+    connectivity : array_like
+        This should be of size (N,8) where N is the number of zones.
+    coordinates : array_like
+        This should be of size (M,3) where M is the number of vertices
+        indicated in the connectivity matrix.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "HexahedralMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamHexahedralStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -228,3 +228,23 @@
                         ds = self.fields[g.mesh_id][fname]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
+
+class IOHandlerStreamOctree(BaseIOHandler):
+    _data_style = "stream_octree"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        ind = 0
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        for chunk in chunks:
+            assert(len(chunk.objs) == 1)
+            for subset in chunk.objs:
+                field_vals = self.fields[subset.domain_id -
+                                    subset._domain_offset]
+                subset.fill(field_vals, rv, selector, ind)
+        return rv

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -74,7 +74,8 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data)
+                        OctVisitorData *data,
+                        int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -115,6 +115,60 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    @classmethod
+    def load_octree(cls, header):
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = header['octree']
+        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'], header['right_edge'])
+        # NOTE: We do not allow domain/file indices to be specified.
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        obj.setup_data(&data, -1)
+        obj.allocate_domains([ref_mask.shape[0]])
+        cdef int i, j, k, n
+        data.global_index = -1
+        data.level = 0
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (obj.DRE[i] - obj.DLE[i]) / obj.nn[i]
+        # Pos is the center of the octs
+        cdef OctAllocationContainer *cur = obj.domains[0]
+        cdef Oct *o
+        cdef void *p[4]
+        cdef np.int64_t nfinest = 0
+        p[0] = ref_mask.data
+        p[1] = <void *> cur.my_octs
+        p[2] = <void *> &cur.n_assigned
+        p[3] = <void *> &nfinest
+        data.array = p
+        pos[0] = obj.DLE[0] + dds[0]/2.0
+        for i in range(obj.nn[0]):
+            pos[1] = obj.DLE[1] + dds[1]/2.0
+            for j in range(obj.nn[1]):
+                pos[2] = obj.DLE[2] + dds[2]/2.0
+                for k in range(obj.nn[2]):
+                    if obj.root_mesh[i][j][k] != NULL:
+                        raise RuntimeError
+                    o = &cur.my_octs[cur.n_assigned]
+                    o.domain_ind = o.file_ind = 0
+                    o.domain = 1
+                    obj.root_mesh[i][j][k] = o
+                    cur.n_assigned += 1
+                    data.pos[0] = i
+                    data.pos[1] = j
+                    data.pos[2] = k
+                    selector.recursively_visit_octs(
+                        obj.root_mesh[i][j][k],
+                        pos, dds, 0, oct_visitors.load_octree,
+                        &data, 1)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+        obj.nocts = cur.n_assigned
+        assert(obj.nocts == ref_mask.size)
+        return obj
+
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
         cdef int i
         data.index = 0
@@ -157,9 +211,11 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
-        vc = self.partial_coverage
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
+        if vc == -1:
+            vc = self.partial_coverage
         data.global_index = -1
         data.level = 0
         cdef np.float64_t pos[3], dds[3]
@@ -431,6 +487,24 @@
             coords[:,i] += self.DLE[i]
         return coords
 
+    def save_octree(self):
+        # Get the header
+        header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
+                      left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
+                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]))
+        if self.partial_coverage == 1:
+            raise NotImplementedError
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        # domain_id = -1 here, because we want *every* oct
+        cdef OctVisitorData data
+        self.setup_data(&data, -1)
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+        data.array = <void *> ref_mask.data
+        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        header['octree'] = ref_mask
+        return header
+
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -647,19 +721,21 @@
                    np.ndarray[np.uint8_t, ndim=1] levels,
                    np.ndarray[np.uint8_t, ndim=1] cell_inds,
                    np.ndarray[np.int64_t, ndim=1] file_inds,
-                   dest_fields, source_fields):
+                   dest_fields, source_fields,
+                   np.int64_t offset = 0):
         cdef np.ndarray[np.float64_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n
         cdef int i, di
-        cdef int local_pos, local_filled
+        cdef np.int64_t local_pos, local_filled = 0
         cdef np.float64_t val
         for key in dest_fields:
             dest = dest_fields[key]
             source = source_fields[key]
             for i in range(levels.shape[0]):
                 if levels[i] != level: continue
-                dest[i] = source[file_inds[i], cell_inds[i]]
+                dest[i + offset] = source[file_inds[i], cell_inds[i]]
+                local_filled += 1
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
@@ -702,6 +778,13 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
         self.fill_func = oct_visitors.fill_file_indices_rind
 
+    @classmethod
+    def load_octree(self, header):
+        raise NotImplementedError
+
+    def save_octree(self):
+        raise NotImplementedError
+
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i
@@ -738,12 +821,14 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
         cdef np.int64_t key, ukey
         data.global_index = -1
         data.level = 0
-        vc = self.partial_coverage
+        if vc == -1:
+            vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -38,7 +38,6 @@
                    # To calculate nzones, 1 << (oref * 3)
     np.int32_t nz
                             
-
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
 
@@ -58,6 +57,8 @@
 cdef oct_visitor_function fill_file_indices_oind
 cdef oct_visitor_function fill_file_indices_rind
 cdef oct_visitor_function count_by_domain
+cdef oct_visitor_function store_octree
+cdef oct_visitor_function load_octree
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -18,6 +18,7 @@
 cimport numpy
 import numpy
 from fp_utils cimport *
+from libc.stdlib cimport malloc, free
 
 # Now some visitor functions
 
@@ -173,3 +174,39 @@
     # NOTE: We do this for every *cell*.
     arr = <np.int64_t *> data.array
     arr[o.domain - 1] += 1
+
+cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef np.uint8_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.uint8_t *> data.array
+        if o.children == NULL:
+            arr[data.index] = 0
+        if o.children != NULL:
+            arr[data.index] = 1
+        data.index += 1
+
+cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef void **p = <void **> data.array
+    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
+    cdef Oct* octs = <Oct*> p[1]
+    cdef np.int64_t *nocts = <np.int64_t*> p[2]
+    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
+    cdef int i
+   
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        if arr[data.index] == 0:
+            o.children = NULL
+            o.file_ind = nfinest[0]
+            o.domain = 1
+            nfinest[0] += 1
+        if arr[data.index] == 1:
+            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                o.children[i] = &octs[nocts[0]]
+                o.children[i].domain_ind = nocts[0]
+                o.children[i].file_ind = -1
+                o.children[i].domain = -1
+                nocts[0] += 1
+        data.index += 1

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,12 +1,14 @@
 from yt.testing import *
 import numpy as np
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer, \
     ParticleRegions
 from yt.geometry.oct_container import _ORDER_MAX
 from yt.utilities.lib.geometry_utils import get_morton_indices
 from yt.frontends.stream.api import load_particles
-from yt.geometry.selection_routines import RegionSelector
+from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
 import yt.data_objects.api
 import time, os
 
@@ -42,6 +44,34 @@
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
         yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
+def test_save_load_octree():
+    np.random.seed(int(0x4d3d3d3))
+    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
+    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
+    octree.n_ref = 32
+    for i in range(3):
+        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
+    # Convert to integers
+    pos = np.floor((pos - DLE)/dx).astype("uint64")
+    morton = get_morton_indices(pos)
+    morton.sort()
+    octree.add(morton)
+    octree.finalize()
+    saved = octree.save_octree()
+    loaded = OctreeContainer.load_octree(saved)
+    always = AlwaysSelector(None)
+    ir1 = octree.ires(always)
+    ir2 = loaded.ires(always)
+    yield assert_equal, ir1, ir2
+
+    fc1 = octree.fcoords(always)
+    fc2 = loaded.fcoords(always)
+    yield assert_equal, fc1, fc2
+
+    fw1 = octree.fwidth(always)
+    fw2 = loaded.fwidth(always)
+    yield assert_equal, fw1, fw2
+
 def test_particle_octree_counts():
     np.random.seed(int(0x4d3d3d3))
     # Eight times as many!

diff -r 4f0f69d5b2b78b8d6827ce3480c97350fc529418 -r 5cce4557a832a7457f6057c51003a9d26c010b50 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh
+    load_particles, load_hexahedral_mesh, load_octree
 
 from yt.frontends.sph.api import \
     OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \


https://bitbucket.org/yt_analysis/yt-3.0/commits/8c5074fceef2/
Changeset:   8c5074fceef2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-03 21:07:43
Summary:     Merging.
Affected #:  11 files

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -198,9 +198,6 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class ChomboStaticOutput(StaticOutput):
     _hierarchy_class = ChomboHierarchy
     _fieldinfo_fallback = ChomboFieldInfo

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -572,10 +572,6 @@
                     result[p] = result[p][0:max_num]
         return result
 
-    def _setup_data_io(self):
-            self.io = io_registry[self.data_style](self.parameter_file)
-
-
 class EnzoHierarchyInMemory(EnzoHierarchy):
 
     grid = EnzoGridInMemory

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -199,9 +199,6 @@
                 # Translating an already-converted field
                 self.parameter_file.conversion_factors[field] = 1.0 
                 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
     _fieldinfo_fallback = FLASHFieldInfo

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -170,9 +170,6 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class GDFStaticOutput(StaticOutput):
     _hierarchy_class = GDFHierarchy
     _fieldinfo_fallback = GDFFieldInfo

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -68,9 +68,6 @@
     def _count_grids(self):
         self.num_grids = 1 #self._fhandle['/grid_parent_id'].shape[0]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class MoabHex8StaticOutput(StaticOutput):
     _hierarchy_class = MoabHex8Hierarchy
     _fieldinfo_fallback = MoabFieldInfo

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -168,9 +168,6 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
 class PlutoStaticOutput(StaticOutput):
     _hierarchy_class = PlutoHierarchy
     _fieldinfo_fallback = PlutoFieldInfo

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/sph/definitions.py
--- a/yt/frontends/sph/definitions.py
+++ b/yt/frontends/sph/definitions.py
@@ -0,0 +1,5 @@
+
+gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
+ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
+                 "PartType4", "PartType5")
+

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -28,6 +28,9 @@
     NullFunc, \
     TranslationFunc
 import yt.data_objects.universal_fields
+from .definitions import \
+    gadget_ptypes, \
+    ghdf5_ptypes
 from yt.data_objects.particle_fields import \
     particle_deposition_functions, \
     particle_scalar_functions, \
@@ -105,24 +108,9 @@
 
     # This has to be done manually for Gadget, because some of the particles will
     # have uniform mass
-    def _gadget_particle_fields(ptype):
-        def _Mass(field, data):
-            pind = ptypes.index(ptype)
-            if data.pf["Massarr"][pind] == 0.0:
-                return data[ptype, "Masses"].copy()
-            mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
-            # Note that this is an alias, which is why we need to apply conversion
-            # here.  Otherwise we'd have an asymmetry.
-            mass *= data.pf["Massarr"][pind] * data.convert("mass")
-            return mass
-        field_registry.add_field((ptype, "Mass"), function=_Mass,
-                                  particle_type = True)
-
     for fname in ["Coordinates", "Velocities", "ParticleIDs",
-                  # Note: Mass, not Masses
-                  "Mass", "particle_index"]:
-        func = _field_concat(fname)
-        field_registry.add_field(("all", fname), function=func,
+                  "Masses", "Mass", "particle_index"]:
+        field_registry.add_field(("all", fname), function=NullFunc,
                 particle_type = True)
 
     for ptype in ptypes:
@@ -130,12 +118,11 @@
             particle_type = True,
             convert_function=_get_conv("mass"),
             units = r"\mathrm{g}")
-        _gadget_particle_fields(ptype)
         known_registry.add_field((ptype, "Velocities"), function=NullFunc,
             particle_type = True,
             convert_function=_get_conv("velocity"),
             units = r"\mathrm{cm}/\mathrm{s}")
-        particle_deposition_functions(ptype, "Coordinates", "Mass", field_registry)
+        particle_deposition_functions(ptype, "Coordinates", "Masses", field_registry)
         particle_scalar_functions(ptype, "Coordinates", "Velocities", field_registry)
         known_registry.add_field((ptype, "Coordinates"), function=NullFunc,
             particle_type = True)
@@ -143,7 +130,7 @@
         field_registry.add_field( (ptype, "particle_index"),
             function = TranslationFunc((ptype, "ParticleIDs")),
             particle_type = True)
-    particle_deposition_functions("all", "Coordinates", "Mass", field_registry)
+    particle_deposition_functions("all", "Coordinates", "Masses", field_registry)
 
     # Now we have to manually apply the splits for "all", since we don't want to
     # use the splits defined above.
@@ -156,14 +143,10 @@
                     particle_type = True)
 
 # Note that we call the same function a few times here.
-_gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
-_ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
-                  "PartType4", "PartType5")
-
-_setup_gadget_fields(_gadget_ptypes,
+_setup_gadget_fields(gadget_ptypes,
     GadgetFieldInfo,
     KnownGadgetFields)
-_setup_gadget_fields(_ghdf5_ptypes,
+_setup_gadget_fields(ghdf5_ptypes,
     GadgetHDF5FieldInfo,
     KnownGadgetHDF5Fields)
 

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -16,6 +16,7 @@
 
 import h5py
 import numpy as np
+from .definitions import gadget_ptypes, ghdf5_ptypes
 from yt.funcs import *
 from yt.utilities.exceptions import *
 
@@ -29,8 +30,6 @@
 
 CHUNKSIZE = 10000000
 
-_vector_fields = ("Coordinates", "Velocity", "Velocities")
-
 def _get_h5_handle(fn):
     try:
         f = h5py.File(fn, "r")
@@ -45,23 +44,27 @@
 
 class IOHandlerOWLS(BaseIOHandler):
     _data_style = "OWLS"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    _known_ptypes = ghdf5_ptypes
+    _var_mass = None
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.pf["Massarr"]):
+                if v == 0:
+                    vm.append(self._known_ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
         rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        psize = defaultdict(lambda: 0)
         chunks = list(chunks)
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-        # For this type of file, we actually have something slightly different.
-        # We are given a list of ParticleDataChunks, which is composed of
-        # individual ParticleOctreeSubsets.  The data_files attribute on these
-        # may in fact overlap.  So we will iterate over a union of all the
-        # data_files.
         data_files = set([])
         for chunk in chunks:
             for obj in chunk.objs:
@@ -70,21 +73,18 @@
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
-                coords = f["/%s/Coordinates" % ptype][:].astype("float64")
-                psize[ptype] += selector.count_points(
-                    coords[:,0], coords[:,1], coords[:,2])
-                del coords
+                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
+                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
+                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
+                yield ptype, (x, y, z)
             f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
         # Now we have all the sizes, and we can allocate
-        ind = {}
-        for field in fields:
-            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            if field[1] in _vector_fields:
-                shape = (psize[field[0]], 3)
-            else:
-                shape = psize[field[0]]
-            rv[field] = np.empty(shape, dtype="float64")
-            ind[field] = 0
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
         for data_file in data_files:
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
@@ -95,14 +95,14 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
-                    data = g[field][:][mask,...]
-                    my_ind = ind[ptype, field]
-                    mylog.debug("Filling from %s to %s with %s",
-                        my_ind, my_ind+data.shape[0], field)
-                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                    ind[ptype, field] += data.shape[0]
+                    if field == "Masses" and ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        ind = self._known_ptypes.index(ptype) 
+                        data[:] = self.pf["Massarr"][ind]
+                    else:
+                        data = g[field][:][mask,...]
+                    yield (ptype, field), data
             f.close()
-        return rv
 
     def _initialize_index(self, data_file, regions):
         f = _get_h5_handle(data_file.filename)
@@ -145,6 +145,9 @@
                 if not hasattr(g[k], "shape"): continue
                 # str => not unicode!
                 fields.append((ptype, str(k)))
+            if "Masses" not in g.keys():
+                # We'll append it anyway.
+                fields.append((ptype, "Masses"))
         f.close()
         return fields
 
@@ -155,6 +158,7 @@
 
 class IOHandlerGadgetBinary(BaseIOHandler):
     _data_style = "gadget_binary"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     # Particle types (Table 3 in GADGET-2 user guide)
     _ptypes = ( "Gas",
@@ -337,6 +341,7 @@
 
 class IOHandlerTipsyBinary(BaseIOHandler):
     _data_style = "tipsy"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     _pdtypes = None # dtypes, to be filled in later
 

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -329,7 +329,7 @@
 
     def _setup_data_io(self):
         if getattr(self, "io", None) is not None: return
-        self.io = io_registry[self.data_style]()
+        self.io = io_registry[self.data_style](self.parameter_file)
 
     def _save_data(self, array, node, name, set_attr=None, force=False, passthrough = False):
         """

diff -r 5cce4557a832a7457f6057c51003a9d26c010b50 -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -16,17 +16,19 @@
 from collections import defaultdict
 
 import yt.utilities.lib as au
+from yt.funcs import mylog
 import exceptions
 import cPickle
 import os
 import h5py
+import numpy as np
 
 _axis_ids = {0:2,1:1,2:0}
 
 io_registry = {}
 
 class BaseIOHandler(object):
-
+    _vector_fields = ()
     _data_style = None
     _particle_reader = False
 
@@ -36,8 +38,9 @@
             if hasattr(cls, "_data_style"):
                 io_registry[cls._data_style] = cls
 
-    def __init__(self):
+    def __init__(self, pf):
         self.queue = defaultdict(dict)
+        self.pf = pf
 
     # We need a function for reading a list of sets
     # and a function for *popping* from a queue all the appropriate sets
@@ -111,6 +114,62 @@
     def _read_chunk_data(self, chunk, fields):
         return None
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        ind = {}
+        # We first need a set of masks for each particle type
+        ptf = defaultdict(list)        # ON-DISK TO READ
+        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
+        fsize = defaultdict(lambda: 0) # COUNT RV
+        field_maps = defaultdict(list) # ptypes -> fields
+        chunks = list(chunks)
+        # What we need is a mapping from particle types to return types
+        ondisk_ptypes = [pt for pt in self.pf.particle_types
+                         if pt not in self.pf.known_filters
+                         and pt != "all"]
+        for field in fields:
+            ftype, fname = field
+            fsize[field] = 0
+            # We should add a check for p.fparticle_unions or something here
+            if ftype == "all":
+                for pt in ondisk_ptypes:
+                    ptf[pt].append(fname)
+                    field_maps[pt, fname].append(field)
+            else:
+                ptf[ftype].append(fname)
+                field_maps[field].append(field)
+        # Now we have our full listing.
+        # Here, ptype_map means which particles contribute to a given type.
+        # And ptf is the actual fields from disk to read.
+        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+            psize[ptype] += selector.count_points(x, y, z)
+        # Now we allocate
+        # ptf, remember, is our mapping of what we want to read
+        #for ptype in ptf:
+        for field in fields:
+            if field[0] == "all":
+                for pt in ondisk_ptypes:
+                    fsize[field] += psize[pt]
+            else:
+                fsize[field] += psize[field[0]]
+        for field in fields:
+            if field[1] in self._vector_fields:
+                shape = (fsize[field], 3)
+            else:
+                shape = (fsize[field], )
+            rv[field] = np.empty(shape, dtype="float64")
+            ind[field] = 0
+        # Now we read.
+        for field_r, vals in self._read_particle_fields(chunks, ptf, selector):
+            # Note that we now need to check the mappings
+            for field_f in field_maps[field_r]:
+                my_ind = ind[field_f]
+                mylog.debug("Filling %s from %s to %s with %s",
+                    field_f, my_ind, my_ind+vals.shape[0], field_r)
+                rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
+                ind[field_f] += vals.shape[0]
+        return rv
+
 class IOHandlerExtracted(BaseIOHandler):
 
     _data_style = 'extracted'


https://bitbucket.org/yt_analysis/yt-3.0/commits/883ee57e0561/
Changeset:   883ee57e0561
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-03 22:38:19
Summary:     Initial draft of particle unions.
Affected #:  13 files

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -264,7 +264,7 @@
             if not isinstance(item, tuple):
                 field = ("unknown", item)
                 finfo = self.pf._get_field_info(*field)
-                mylog.debug("Guessing field %s is %s", item, finfo.name)
+                #mylog.debug("Guessing field %s is %s", item, finfo.name)
             else:
                 field = item
             finfo = self.pf._get_field_info(*field)

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/data_objects/particle_unions.py
--- /dev/null
+++ b/yt/data_objects/particle_unions.py
@@ -0,0 +1,28 @@
+"""
+These are particle union objects.  These essentially alias one particle to
+another, where the other can be one or several particle types.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import ensure_list
+
+class ParticleUnion(object):
+    def __init__(self, name, sub_types):
+        self.name = name
+        self.sub_types = ensure_list(sub_types)
+
+    def __iter__(self):
+        for st in self.sub_types:
+            yield st

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -48,7 +48,7 @@
 
     default_fluid_type = "gas"
     fluid_types = ("gas","deposit")
-    particle_types = ("all",)
+    particle_types = None
     geometry = "cartesian"
     coordinates = None
     max_level = 99
@@ -92,6 +92,7 @@
         self.conversion_factors = {}
         self.parameters = {}
         self.known_filters = {}
+        self.particle_unions = {}
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -314,6 +315,26 @@
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 
+    def add_particle_union(self, union):
+        # No string lookups here, we need an actual union.
+        f = self.particle_fields_by_type
+        fields = set_intersection([f[s] for s in union
+                                   if s in self.particle_types_raw])
+        self.particle_types += (union.name,)
+        self.particle_unions[union.name] = union
+        fields = [ (union.name, field) for field in fields]
+        self.h.field_list.extend(fields)
+        self.h._setup_unknown_fields(fields)
+        self.h._setup_particle_fields(union.name)
+
+    @property
+    def particle_fields_by_type(self):
+        fields = defaultdict(list)
+        for field in self.h.field_list:
+            if field[0] in self.particle_types_raw:
+                fields[field[0]].append(field[1])
+        return fields
+
     @property
     def ires_factor(self):
         o2 = np.log2(self.refine_by)

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -118,7 +118,7 @@
         if "wspecies" in self.parameter_file.parameters.keys():
             wspecies = self.parameter_file.parameters['wspecies']
             nspecies = len(wspecies)
-            self.parameter_file.particle_types = ["all", "darkmatter", "stars"]
+            self.parameter_file.particle_types = ["darkmatter", "stars"]
             for specie in range(nspecies):
                 self.parameter_file.particle_types.append("specie%i" % specie)
         else:

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -461,7 +461,7 @@
                             "species_%02d_secondary_variable_labels"
                             % (species, )])
 
-            self.particle_types = ("all",) + tuple(
+            self.particle_types = tuple(
                 set(art_to_yt[s] for s in
                     self.artio_parameters["particle_species_labels"]))
             self.particle_types = tuple(self.particle_types)

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -892,7 +892,7 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
-        self.particle_types = ["all"]
+        self.particle_types = ["enzo"]
         for ptype in self.parameters.get("AppendActiveParticleType", []):
             self.particle_types.append(ptype)
         if self.parameters["NumberOfParticles"] > 0 and \

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -36,8 +36,6 @@
     mass_sun_cgs
 from yt.utilities.cosmology import Cosmology
 from .fields import \
-    OWLSFieldInfo, \
-    KnownOWLSFields, \
     GadgetFieldInfo, \
     KnownGadgetFields, \
     GadgetHDF5FieldInfo, \
@@ -267,31 +265,53 @@
         # We do not allow load() of these files.
         return False
 
+class GadgetHDF5StaticOutput(GadgetStaticOutput):
+    _file_class = ParticleFile
+    _fieldinfo_fallback = GadgetHDF5FieldInfo
+    _fieldinfo_known = KnownGadgetHDF5Fields
+    _suffix = ".hdf5"
 
-class OWLSStaticOutput(GadgetStaticOutput):
-    _hierarchy_class = ParticleGeometryHandler
-    _file_class = ParticleFile
-    _fieldinfo_fallback = OWLSFieldInfo  # For now we have separate from Gadget
-    _fieldinfo_known = KnownOWLSFields
-    _particle_mass_name = "Mass"
-    _particle_coordinates_name = "Coordinates"
-    _header_spec = None  # Override so that there's no confusion
-
-    def __init__(self, filename, data_style="OWLS", n_ref=64,
-                 over_refine_factor=1):
+    def __init__(self, filename, data_style="gadget_hdf5", 
+                 unit_base = None, n_ref=64,
+                 over_refine_factor=1,
+                 bounding_box = None):
         self.storage_filename = None
         filename = os.path.abspath(filename)
-        super(OWLSStaticOutput, self).__init__(
-            filename, data_style, unit_base=None, n_ref=n_ref,
-            over_refine_factor=over_refine_factor)
+        super(GadgetHDF5StaticOutput, self).__init__(
+            filename, data_style, unit_base=unit_base, n_ref=n_ref,
+            over_refine_factor=over_refine_factor,
+            bounding_box = bounding_box)
 
-    def __repr__(self):
-        return os.path.basename(self.parameter_filename).split(".")[0]
+    def _get_hvals(self):
+        handle = h5py.File(self.parameter_filename, mode="r")
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+        # Compat reasons.
+        hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+        hvals["Massarr"] = hvals["MassTable"]
+        return hvals
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0], mode='r')
+            if "Constants" not in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys():
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False
+
+class OWLSStaticOutput(GadgetHDF5StaticOutput):
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")
         hvals = {}
         hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+        hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+        hvals["Massarr"] = hvals["MassTable"]
 
         self.dimensionality = 3
         self.refine_by = 2
@@ -340,45 +360,6 @@
             pass
         return False
 
-class GadgetHDF5StaticOutput(GadgetStaticOutput):
-    _file_class = ParticleFile
-    _fieldinfo_fallback = GadgetHDF5FieldInfo
-    _fieldinfo_known = KnownGadgetHDF5Fields
-    _suffix = ".hdf5"
-
-    def __init__(self, filename, data_style="gadget_hdf5", 
-                 unit_base = None, n_ref=64,
-                 over_refine_factor=1,
-                 bounding_box = None):
-        self.storage_filename = None
-        filename = os.path.abspath(filename)
-        super(GadgetHDF5StaticOutput, self).__init__(
-            filename, data_style, unit_base=unit_base, n_ref=n_ref,
-            over_refine_factor=over_refine_factor,
-            bounding_box = bounding_box)
-
-    def _get_hvals(self):
-        handle = h5py.File(self.parameter_filename, mode="r")
-        hvals = {}
-        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
-        # Compat reasons.
-        hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
-        hvals["Massarr"] = hvals["MassTable"]
-        return hvals
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0], mode='r')
-            if "Constants" not in fileh["/"].keys() and \
-               "Header" in fileh["/"].keys():
-                fileh.close()
-                return True
-            fileh.close()
-        except:
-            pass
-        return False
-
 class TipsyFile(ParticleFile):
 
     def _calculate_offsets(self, field_list):

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -88,17 +88,9 @@
 
 for fname in ["Coordinates", "Velocities", "ParticleIDs", "Mass",
               "Epsilon", "Phi"]:
-    func = _field_concat(fname)
-    TipsyFieldInfo.add_field(("all", fname), function=func,
+    TipsyFieldInfo.add_field(("all", fname), function=NullFunc,
             particle_type = True)
 
-for iname, oname in [("Coordinates", "particle_position_"),
-                     ("Velocities", "particle_velocity_")]:
-    for axi, ax in enumerate("xyz"):
-        func = _field_concat_slice(iname, axi)
-        TipsyFieldInfo.add_field(("all", oname + ax), function=func,
-                particle_type = True)
-
 # GADGET
 # ======
 
@@ -135,13 +127,6 @@
     # Now we have to manually apply the splits for "all", since we don't want to
     # use the splits defined above.
 
-    for iname, oname in [("Coordinates", "particle_position_"),
-                         ("Velocities", "particle_velocity_")]:
-        for axi, ax in enumerate("xyz"):
-            func = _field_concat_slice(iname, axi)
-            field_registry.add_field(("all", oname + ax), function=func,
-                    particle_type = True)
-
 # Note that we call the same function a few times here.
 _setup_gadget_fields(gadget_ptypes,
     GadgetFieldInfo,
@@ -150,69 +135,6 @@
     GadgetHDF5FieldInfo,
     KnownGadgetHDF5Fields)
 
-
-# OWLS
-# ====
-
-# I am optimistic that some day we will be able to get rid of much of this, and
-# make OWLS a subclass of Gadget fields.
-
-_owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
-                "PartType4", "PartType5")
-
-for fname in ["Coordinates", "Velocity", "ParticleIDs",
-              # Note: Mass, not Masses
-              "Mass", "particle_index"]:
-    func = _field_concat(fname)
-    OWLSFieldInfo.add_field(("all", fname), function=func,
-            particle_type = True)
-
-def _owls_particle_fields(ptype):
-    def _Mass(field, data):
-        pind = _owls_ptypes.index(ptype)
-        if data.pf["MassTable"][pind] == 0.0:
-            raise RuntimeError
-        mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
-        # Note that this is an alias, which is why we need to apply conversion
-        # here.  Otherwise we'd have an asymmetry.
-        mass *= data.pf["MassTable"][pind] 
-        return mass
-    OWLSFieldInfo.add_field((ptype, "Mass"), function=_Mass,
-                            convert_function = _get_conv("mass"),
-                            particle_type = True)
-
-for ptype in _owls_ptypes:
-    # Note that this adds a "Known" Mass field and a "Derived" Mass field.
-    # This way the "Known" will get used, and if it's not there, it will use
-    # the derived.
-    KnownOWLSFields.add_field((ptype, "Mass"), function=NullFunc,
-        particle_type = True,
-        convert_function=_get_conv("mass"),
-        units = r"\mathrm{g}")
-    _owls_particle_fields(ptype)
-    KnownOWLSFields.add_field((ptype, "Velocity"), function=NullFunc,
-        particle_type = True,
-        convert_function=_get_conv("velocity"),
-        units = r"\mathrm{cm}/\mathrm{s}")
-    particle_deposition_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
-    particle_scalar_functions(ptype, "Coordinates", "Velocity", OWLSFieldInfo)
-    KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
-        particle_type = True)
-    OWLSFieldInfo.add_field( (ptype, "particle_index"),
-        function = TranslationFunc((ptype, "ParticleIDs")),
-        particle_type = True)
-particle_deposition_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
-
-# Now we have to manually apply the splits for "all", since we don't want to
-# use the splits defined above.
-
-for iname, oname in [("Coordinates", "particle_position_"),
-                     ("Velocity", "particle_velocity_")]:
-    for axi, ax in enumerate("xyz"):
-        func = _field_concat_slice(iname, axi)
-        OWLSFieldInfo.add_field(("all", oname + ax), function=func,
-                particle_type = True)
-
 def SmoothedGas(field, data):
     pos = data["PartType0", "Coordinates"]
     sml = data["PartType0", "SmoothingLength"]

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -63,7 +63,6 @@
 
     def _read_particle_coords(self, chunks, ptf):
         # This will read chunks and yield the results.
-        rv = {}
         chunks = list(chunks)
         data_files = set([])
         for chunk in chunks:
@@ -226,7 +225,7 @@
         ind = {}
         for field in fields:
             mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            if field[1] in _vector_fields:
+            if field[1] in self._vector_fields:
                 shape = (psize[field[0]], 3)
             else:
                 shape = psize[field[0]]
@@ -262,10 +261,10 @@
             dt = "uint32"
         else:
             dt = "float32"
-        if name in _vector_fields:
+        if name in self._vector_fields:
             count *= 3
         arr = np.fromfile(f, dtype=dt, count = count)
-        if name in _vector_fields:
+        if name in self._vector_fields:
             arr = arr.reshape((count/3, 3), order="C")
         return arr.astype("float64")
 
@@ -310,7 +309,7 @@
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos
-                if field in _vector_fields:
+                if field in self._vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
@@ -382,7 +381,7 @@
         rv = {}
         for field in fields:
             mylog.debug("Allocating %s values for %s", size, field)
-            if field in _vector_fields:
+            if field in self._vector_fields:
                 rv[field] = np.empty((size, 3), dtype="float64")
                 if size == 0: continue
                 rv[field][:,0] = vals[field]['x'][mask]
@@ -400,16 +399,7 @@
                       self.domain_right_edge[i] - eps)
         return rv
 
-    def _read_particle_selection(self, chunks, selector, fields):
-        rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        ptypes = set()
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-            ptypes.add(ftype)
-        ptypes = list(ptypes)
-        ptypes.sort(key = lambda a: self._ptypes.index(a))
+    def _read_particle_coords(self, chunks, ptf):
         data_files = set([])
         for chunk in chunks:
             for obj in chunk.objs:
@@ -418,19 +408,36 @@
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
-            for ptype in ptypes:
+            for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+                if tp[ptype] == 0: continue
+                f.seek(poff[ptype], os.SEEK_SET)
+                p = np.fromfile(f, self._pdtypes[ptype], count=tp[ptype])
+                d = [p['Coordinates'][ax].astype("float64") for ax in 'xyz']
+                del p
+                yield ptype, d
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            poff = data_file.field_offsets
+            tp = data_file.total_particles
+            f = open(data_file.filename, "rb")
+            for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
                 p = np.fromfile(f, self._pdtypes[ptype], count=tp[ptype])
                 mask = selector.select_points(
                     p['Coordinates']['x'].astype("float64"),
                     p['Coordinates']['y'].astype("float64"),
                     p['Coordinates']['z'].astype("float64"))
-                tf = self._fill_fields(ptf[ptype], p, mask)
-                for field in tf:
-                    rv[ptype, field] = tf[field]
-                del p, tf
+                if mask is None: continue
+                tf = self._fill_fields(field_list, p, mask)
+                for field in field_list:
+                    yield (ptype, field), tf.pop(field)
             f.close()
-        return rv
 
     def _initialize_index(self, data_file, regions):
         pf = data_file.pf
@@ -499,7 +506,7 @@
             if tp[ptype] == 0: continue
             dtbase = data_file.pf._field_dtypes.get(field, 'f')
             ff = "%s%s" % (data_file.pf.endian, dtbase)
-            if field in _vector_fields:
+            if field in self._vector_fields:
                 dt = (field, [('x', ff), ('y', ff), ('z', ff)])
             else:
                 dt = (field, ff)

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -625,3 +625,11 @@
         return
     if not os.path.exists(my_dir):
         only_on_root(os.makedirs, my_dir)
+
+def set_intersection(some_list):
+    if len(some_list) == 0: return set([])
+    # This accepts a list of iterables, which we get the intersection of.
+    s = set(some_list[0])
+    for l in some_list[1:]:
+        s.intersection_update(l)
+    return s

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -132,10 +132,11 @@
             self.proj = self.overlap_proj
         self.object_types.sort()
 
-    def _setup_unknown_fields(self):
+    def _setup_unknown_fields(self, list_of_fields = None):
         known_fields = self.parameter_file._fieldinfo_known
-        mylog.debug("Checking %s", self.field_list)
-        for field in self.field_list:
+        field_list = list_of_fields or self.field_list
+        mylog.debug("Checking %s", field_list)
+        for field in field_list:
             # By allowing a backup, we don't mandate that it's found in our
             # current field info.  This means we'll instead simply override
             # it.

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -125,7 +125,9 @@
         self.field_list = pfl
         pf = self.parameter_file
         pf.particle_types = tuple(set(pt for pt, pf in pfl))
-        pf.particle_types += ('all',)
+        # This is an attribute that means these particle types *actually*
+        # exist.  As in, they are real, in the dataset.
+        pf.particle_types_raw = pf.particle_types
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()

diff -r 8c5074fceef2d66419bf09758a5f871a0a17ecb2 -r 883ee57e056108f87dfa36a7cef8a483f65de4ca yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -123,16 +123,14 @@
         fsize = defaultdict(lambda: 0) # COUNT RV
         field_maps = defaultdict(list) # ptypes -> fields
         chunks = list(chunks)
+        unions = self.pf.particle_unions
         # What we need is a mapping from particle types to return types
-        ondisk_ptypes = [pt for pt in self.pf.particle_types
-                         if pt not in self.pf.known_filters
-                         and pt != "all"]
         for field in fields:
             ftype, fname = field
             fsize[field] = 0
             # We should add a check for p.fparticle_unions or something here
-            if ftype == "all":
-                for pt in ondisk_ptypes:
+            if ftype in unions:
+                for pt in unions[ftype]:
                     ptf[pt].append(fname)
                     field_maps[pt, fname].append(field)
             else:
@@ -147,8 +145,8 @@
         # ptf, remember, is our mapping of what we want to read
         #for ptype in ptf:
         for field in fields:
-            if field[0] == "all":
-                for pt in ondisk_ptypes:
+            if field[0] in unions:
+                for pt in unions[field[0]]:
                     fsize[field] += psize[pt]
             else:
                 fsize[field] += psize[field[0]]


https://bitbucket.org/yt_analysis/yt-3.0/commits/def35d5a2e6d/
Changeset:   def35d5a2e6d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-03 23:58:01
Summary:     More work on particle unions.
Affected #:  5 files

diff -r 883ee57e056108f87dfa36a7cef8a483f65de4ca -r def35d5a2e6d46d08f698477bec8e688b44f30e6 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,6 +55,7 @@
     storage_filename = None
     _particle_mass_name = None
     _particle_coordinates_name = None
+    _particle_velocity_name = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -325,7 +326,7 @@
         fields = [ (union.name, field) for field in fields]
         self.h.field_list.extend(fields)
         self.h._setup_unknown_fields(fields)
-        self.h._setup_particle_fields(union.name)
+        self.h._setup_particle_types([union.name])
 
     @property
     def particle_fields_by_type(self):

diff -r 883ee57e056108f87dfa36a7cef8a483f65de4ca -r def35d5a2e6d46d08f698477bec8e688b44f30e6 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -42,6 +42,11 @@
     KnownGadgetHDF5Fields, \
     TipsyFieldInfo, \
     KnownTipsyFields
+from yt.data_objects.field_info_container import \
+    NullFunc, \
+    TranslationFunc
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions
 
 
 class ParticleFile(object):
@@ -115,14 +120,19 @@
             for unit in ur:
                 ud[unit] = ur[unit] / base
 
+    def _setup_particle_type(self, ptype):
+        self.field_info.add_field((ptype, "particle_index"),
+            function = TranslationFunc((ptype, "ParticleIDs")),
+            particle_type = True)
 
 class GadgetStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
     _file_class = GadgetBinaryFile
     _fieldinfo_fallback = GadgetFieldInfo
     _fieldinfo_known = KnownGadgetFields
-    _particle_mass_name = "Mass"
+    _particle_mass_name = "Masses"
     _particle_coordinates_name = "Coordinates"
+    _particle_velocity_name = "Velocities"
     _suffix = ""
     _header_spec = (('Npart', 6, 'i'),
                     ('Massarr', 6, 'd'),
@@ -265,6 +275,7 @@
         # We do not allow load() of these files.
         return False
 
+
 class GadgetHDF5StaticOutput(GadgetStaticOutput):
     _file_class = ParticleFile
     _fieldinfo_fallback = GadgetHDF5FieldInfo
@@ -305,6 +316,7 @@
         return False
 
 class OWLSStaticOutput(GadgetHDF5StaticOutput):
+    _particle_mass_name = "Mass"
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r 883ee57e056108f87dfa36a7cef8a483f65de4ca -r def35d5a2e6d46d08f698477bec8e688b44f30e6 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -91,50 +91,6 @@
     TipsyFieldInfo.add_field(("all", fname), function=NullFunc,
             particle_type = True)
 
-# GADGET
-# ======
-
-# Among other things we need to set up Coordinates
-
-def _setup_gadget_fields(ptypes, field_registry, known_registry):
-
-    # This has to be done manually for Gadget, because some of the particles will
-    # have uniform mass
-    for fname in ["Coordinates", "Velocities", "ParticleIDs",
-                  "Masses", "Mass", "particle_index"]:
-        field_registry.add_field(("all", fname), function=NullFunc,
-                particle_type = True)
-
-    for ptype in ptypes:
-        known_registry.add_field((ptype, "Masses"), function=NullFunc,
-            particle_type = True,
-            convert_function=_get_conv("mass"),
-            units = r"\mathrm{g}")
-        known_registry.add_field((ptype, "Velocities"), function=NullFunc,
-            particle_type = True,
-            convert_function=_get_conv("velocity"),
-            units = r"\mathrm{cm}/\mathrm{s}")
-        particle_deposition_functions(ptype, "Coordinates", "Masses", field_registry)
-        particle_scalar_functions(ptype, "Coordinates", "Velocities", field_registry)
-        known_registry.add_field((ptype, "Coordinates"), function=NullFunc,
-            particle_type = True)
-        # Now we add some translations.
-        field_registry.add_field( (ptype, "particle_index"),
-            function = TranslationFunc((ptype, "ParticleIDs")),
-            particle_type = True)
-    particle_deposition_functions("all", "Coordinates", "Masses", field_registry)
-
-    # Now we have to manually apply the splits for "all", since we don't want to
-    # use the splits defined above.
-
-# Note that we call the same function a few times here.
-_setup_gadget_fields(gadget_ptypes,
-    GadgetFieldInfo,
-    KnownGadgetFields)
-_setup_gadget_fields(ghdf5_ptypes,
-    GadgetHDF5FieldInfo,
-    KnownGadgetHDF5Fields)
-
 def SmoothedGas(field, data):
     pos = data["PartType0", "Coordinates"]
     sml = data["PartType0", "SmoothingLength"]

diff -r 883ee57e056108f87dfa36a7cef8a483f65de4ca -r def35d5a2e6d46d08f698477bec8e688b44f30e6 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -94,7 +94,8 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
-                    if field == "Masses" and ptype not in self.var_mass:
+                    if field in ("Masses", "Mass") and \
+                        ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
                         ind = self._known_ptypes.index(ptype) 
                         data[:] = self.pf["Massarr"][ind]
@@ -134,19 +135,21 @@
     def _identify_fields(self, data_file):
         f = _get_h5_handle(data_file.filename)
         fields = []
+        cname = self.pf._particle_coordinates_name
+        mname = self.pf._particle_mass_name
         for key in f.keys():
             if not key.startswith("PartType"): continue
             g = f[key]
-            if "Coordinates" not in g: continue
+            if cname not in g: continue
             #ptype = int(key[8:])
             ptype = str(key)
             for k in g.keys():
                 if not hasattr(g[k], "shape"): continue
                 # str => not unicode!
                 fields.append((ptype, str(k)))
-            if "Masses" not in g.keys():
+            if mname not in g.keys():
                 # We'll append it anyway.
-                fields.append((ptype, "Masses"))
+                fields.append((ptype, mname))
         f.close()
         return fields
 

diff -r 883ee57e056108f87dfa36a7cef8a483f65de4ca -r def35d5a2e6d46d08f698477bec8e688b44f30e6 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -31,7 +31,8 @@
 from yt.data_objects.field_info_container import \
     NullFunc
 from yt.data_objects.particle_fields import \
-    particle_deposition_functions
+    particle_deposition_functions, \
+    particle_scalar_functions
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -73,6 +74,9 @@
         mylog.debug("Detecting fields in backup.")
         self._detect_fields_backup()
 
+        mylog.debug("Setting up particle fields")
+        self._setup_particle_types()
+
         mylog.debug("Adding unknown detected fields")
         self._setup_unknown_fields()
 
@@ -132,6 +136,34 @@
             self.proj = self.overlap_proj
         self.object_types.sort()
 
+    def _setup_particle_types(self, ptypes = None):
+        mname = self.pf._particle_mass_name
+        cname = self.pf._particle_coordinates_name
+        vname = self.pf._particle_velocity_name
+        # We require overriding if any of this is true
+        if None in (mname, cname, vname): return
+        if ptypes is None: ptypes = self.pf.particle_types_raw
+        fi = self.pf.field_info
+        def _get_conv(cf):
+            def _convert(data):
+                return data.convert(cf)
+            return _convert
+        for ptype in ptypes:
+            fi.add_field((ptype, vname), function=NullFunc,
+                particle_type = True,
+                convert_function=_get_conv("velocity"),
+                units = r"\mathrm{cm}/\mathrm{s}")
+            fi.add_field((ptype, mname), function=NullFunc,
+                particle_type = True,
+                convert_function=_get_conv("mass"),
+                units = r"\mathrm{g}")
+            particle_deposition_functions(ptype, cname, mname, fi)
+            particle_scalar_functions(ptype, cname, vname, fi)
+            fi.add_field((ptype, cname), function=NullFunc,
+                         particle_type = True)
+            # Now we add some translations.
+            self.pf._setup_particle_type(ptype)
+
     def _setup_unknown_fields(self, list_of_fields = None):
         known_fields = self.parameter_file._fieldinfo_known
         field_list = list_of_fields or self.field_list


https://bitbucket.org/yt_analysis/yt-3.0/commits/aec630f6bfc1/
Changeset:   aec630f6bfc1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 00:07:18
Summary:     Passing over_refine_factor through load_octree.
Affected #:  2 files

diff -r def35d5a2e6d46d08f698477bec8e688b44f30e6 -r aec630f6bfc1ba44d3610cb76bc2a41e55a216c9 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1035,7 +1035,8 @@
         header = dict(dims = self.pf.domain_dimensions/2,
                       left_edge = self.pf.domain_left_edge,
                       right_edge = self.pf.domain_right_edge,
-                      octree = self.pf.octree_mask)
+                      octree = self.pf.octree_mask,
+                      over_refine = self.pf.over_refine_factor)
         self.oct_handler = OctreeContainer.load_octree(header)
 
     def _identify_base_chunk(self, dobj):
@@ -1083,7 +1084,8 @@
     _data_style = "stream_octree"
 
 def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
-                bbox=None, sim_time=0.0, periodicity=(True, True, True)):
+                bbox=None, sim_time=0.0, periodicity=(True, True, True),
+                over_refine_factor = 1):
     r"""Load an octree mask into yt.
 
     Octrees can be saved out by calling save_octree on an OctreeContainer.
@@ -1159,6 +1161,7 @@
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    spf.over_refine_factor = over_refine_factor
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
 

diff -r def35d5a2e6d46d08f698477bec8e688b44f30e6 -r aec630f6bfc1ba44d3610cb76bc2a41e55a216c9 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -119,7 +119,8 @@
     def load_octree(cls, header):
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
         ref_mask = header['octree']
-        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'], header['right_edge'])
+        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'],
+                header['right_edge'], over_refine = header['over_refine'])
         # NOTE: We do not allow domain/file indices to be specified.
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         cdef OctVisitorData data
@@ -166,7 +167,9 @@
                 pos[1] += dds[1]
             pos[0] += dds[0]
         obj.nocts = cur.n_assigned
-        assert(obj.nocts == ref_mask.size)
+        if obj.nocts != ref_mask.size:
+            print "SOMETHING WRONG", ref_mask.size, obj.nocts, obj.oref
+            raise RuntimeError
         return obj
 
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
@@ -491,7 +494,8 @@
         # Get the header
         header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
                       left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
-                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]))
+                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]),
+                      over_refine = self.oref)
         if self.partial_coverage == 1:
             raise NotImplementedError
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)


https://bitbucket.org/yt_analysis/yt-3.0/commits/0c13f24e46dd/
Changeset:   0c13f24e46dd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 00:29:58
Summary:     More over_refine_factor and stream refactoring.
Affected #:  3 files

diff -r aec630f6bfc1ba44d3610cb76bc2a41e55a216c9 -r 0c13f24e46dd698d5217fe51d8569756a6b9a894 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -48,7 +48,8 @@
 
     default_fluid_type = "gas"
     fluid_types = ("gas","deposit")
-    particle_types = None
+    particle_types = ()
+    particle_types_raw = ()
     geometry = "cartesian"
     coordinates = None
     max_level = 99

diff -r aec630f6bfc1ba44d3610cb76bc2a41e55a216c9 -r 0c13f24e46dd698d5217fe51d8569756a6b9a894 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -231,7 +231,7 @@
         if self.stream_handler.io is not None:
             self.io = self.stream_handler.io
         else:
-            self.io = io_registry[self.data_style](self.stream_handler)
+            self.io = io_registry[self.data_style](self.pf)
 
     def update_data(self, data) :
 
@@ -731,7 +731,7 @@
         if self.stream_handler.io is not None:
             self.io = self.stream_handler.io
         else:
-            self.io = io_registry[self.data_style](self.stream_handler)
+            self.io = io_registry[self.data_style](self.pf)
 
 class StreamParticleFile(ParticleFile):
     pass
@@ -883,7 +883,7 @@
         if self.stream_handler.io is not None:
             self.io = self.stream_handler.io
         else:
-            self.io = io_registry[self.data_style](self.stream_handler)
+            self.io = io_registry[self.data_style](self.pf)
 
     def _detect_fields(self):
         self.field_list = list(set(self.stream_handler.get_fields()))
@@ -988,9 +988,9 @@
 class StreamOctreeSubset(OctreeSubset):
     domain_id = 1
     _domain_offset = 1
-    _num_zones = 2
 
-    def __init__(self, base_region, pf, oct_handler):
+    def __init__(self, base_region, pf, oct_handler, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.pf = pf
@@ -1029,10 +1029,10 @@
         if self.stream_handler.io is not None:
             self.io = self.stream_handler.io
         else:
-            self.io = io_registry[self.data_style](self.stream_handler)
+            self.io = io_registry[self.data_style](self.pf)
 
     def _initialize_oct_handler(self):
-        header = dict(dims = self.pf.domain_dimensions/2,
+        header = dict(dims = [1, 1, 1],
                       left_edge = self.pf.domain_left_edge,
                       right_edge = self.pf.domain_right_edge,
                       octree = self.pf.octree_mask,
@@ -1043,7 +1043,8 @@
         if getattr(dobj, "_chunk_info", None) is None:
             base_region = getattr(dobj, "base_region", dobj)
             subset = [StreamOctreeSubset(base_region, self.parameter_file,
-                                         self.oct_handler)]
+                                         self.oct_handler,
+                                         self.pf.over_refine_factor)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
@@ -1083,7 +1084,7 @@
     _fieldinfo_known = KnownStreamFields
     _data_style = "stream_octree"
 
-def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
+def load_octree(octree_mask, data, sim_unit_to_cm,
                 bbox=None, sim_time=0.0, periodicity=(True, True, True),
                 over_refine_factor = 1):
     r"""Load an octree mask into yt.
@@ -1098,8 +1099,6 @@
     ----------
     octree_mask : np.ndarray[uint8_t]
         This is a depth-first refinement mask for an Octree.
-    domain_dimensions : array_like
-        This is the domain dimensions of the grid
     data : dict
         A dictionary of 1D arrays.  Note that these must of the size of the
         number of "False" values in the ``octree_mask``.
@@ -1115,7 +1114,8 @@
 
     """
 
-    domain_dimensions = np.array(domain_dimensions)
+    nz = (1 << (over_refine_factor))
+    domain_dimensions = np.array([nz, nz, nz])
     nprocs = 1
     if bbox is None:
         bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
@@ -1204,7 +1204,7 @@
         if self.stream_handler.io is not None:
             self.io = self.stream_handler.io
         else:
-            self.io = io_registry[self.data_style](self.stream_handler)
+            self.io = io_registry[self.data_style](self.pf)
 
     def _detect_fields(self):
         self.field_list = list(set(self.stream_handler.get_fields()))

diff -r aec630f6bfc1ba44d3610cb76bc2a41e55a216c9 -r 0c13f24e46dd698d5217fe51d8569756a6b9a894 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -29,9 +29,9 @@
 
     _data_style = "stream"
 
-    def __init__(self, stream_handler):
-        self.fields = stream_handler.fields
-        BaseIOHandler.__init__(self)
+    def __init__(self, pf):
+        self.fields = pf.stream_handler.fields
+        super(IOHandlerStream, self).__init__(pf)
 
     def _read_data_set(self, grid, field):
         # This is where we implement processor-locking
@@ -123,9 +123,9 @@
 
     _data_style = "stream_particles"
 
-    def __init__(self, stream_handler):
-        self.fields = stream_handler.fields
-        BaseIOHandler.__init__(self)
+    def __init__(self, pf):
+        self.fields = pf.stream_handler.fields
+        super(StreamParticleIOHandler, self).__init__(pf)
 
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
@@ -203,9 +203,9 @@
 class IOHandlerStreamHexahedral(BaseIOHandler):
     _data_style = "stream_hexahedral"
 
-    def __init__(self, stream_handler):
-        self.fields = stream_handler.fields
-        BaseIOHandler.__init__(self)
+    def __init__(self, pf):
+        self.fields = pf.stream_handler.fields
+        super(IOHandlerStreamHexahedral, self).__init__(pf)
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
@@ -232,9 +232,9 @@
 class IOHandlerStreamOctree(BaseIOHandler):
     _data_style = "stream_octree"
 
-    def __init__(self, stream_handler):
-        self.fields = stream_handler.fields
-        BaseIOHandler.__init__(self)
+    def __init__(self, pf):
+        self.fields = pf.stream_handler.fields
+        super(IOHandlerStreamOctree, self).__init__(pf)
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}


https://bitbucket.org/yt_analysis/yt-3.0/commits/f816923abcb6/
Changeset:   f816923abcb6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 00:53:57
Summary:     A few more oref == 0 changes.
Affected #:  2 files

diff -r 0c13f24e46dd698d5217fe51d8569756a6b9a894 -r f816923abcb6e57acdccfa3378ceb4170e2a873f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -301,6 +301,8 @@
             else:
                 next = NULL
         if oinfo == NULL: return cur
+        cdef np.float64_t factor = 1.0 / (1 << (self.oref-1))
+        if self.oref == 0: factor = 1.0
         for i in range(3):
             # This will happen *after* we quit out, so we need to back out the
             # last change to cp
@@ -312,9 +314,11 @@
             # from the oct width, thus making it already the cell width.
             # But, for some cases where the oref != 1, this needs to be
             # changed.
-            oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
+            oinfo.dds[i] = dds[i] * factor # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
             oinfo.ipos[i] = ipos[i]
+        if self.oref == 0:
+            oinfo.dds[i] = dds[i] # Same here as elsewhere
         oinfo.level = level
         return cur
 

diff -r 0c13f24e46dd698d5217fe51d8569756a6b9a894 -r f816923abcb6e57acdccfa3378ceb4170e2a873f yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -233,11 +233,16 @@
                             data.pos[1] = (data.pos[1] >> 1)
                             data.pos[2] = (data.pos[2] >> 1)
                             data.level -= 1
-                        elif this_level == 1:
+                        elif this_level == 1 and data.oref > 0:
                             data.global_index += increment
                             increment = 0
                             self.visit_oct_cells(data, root, ch, spos, sdds,
                                                  func, i, j, k)
+                        elif this_level == 1 and increment == 1:
+                            data.global_index += increment
+                            increment = 0
+                            data.ind[0] = data.ind[1] = data.ind[2] = 0
+                            func(root, data, 1)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]


https://bitbucket.org/yt_analysis/yt-3.0/commits/fbea3834d2f5/
Changeset:   fbea3834d2f5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 02:08:55
Summary:     Merging
Affected #:  0 files



https://bitbucket.org/yt_analysis/yt-3.0/commits/6526995ce400/
Changeset:   6526995ce400
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 17:33:06
Summary:     This fixes the tests by setting a default particle type.  Additional answer tests coming.
Affected #:  1 file

diff -r fbea3834d2f59616a590fedd41cefe27bcc8ba30 -r 6526995ce400d7cdf8bf17305995356ee77be80c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -48,8 +48,8 @@
 
     default_fluid_type = "gas"
     fluid_types = ("gas","deposit")
-    particle_types = ()
-    particle_types_raw = ()
+    particle_types = ("all",) # By default we have an 'all'
+    particle_types_raw = ("all",)
     geometry = "cartesian"
     coordinates = None
     max_level = 99


https://bitbucket.org/yt_analysis/yt-3.0/commits/d01dc8c3be78/
Changeset:   d01dc8c3be78
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 17:50:14
Summary:     Adding a hook to call particle union after hierarchy is instantiated.
Affected #:  2 files

diff -r 6526995ce400d7cdf8bf17305995356ee77be80c -r d01dc8c3be7870f6ffc603fa066bb81ac8a959bc yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -29,6 +29,8 @@
     FieldInfoContainer, NullFunc
 from yt.data_objects.particle_filters import \
     filter_registry
+from yt.data_objects.particle_unions import \
+    ParticleUnion
 from yt.utilities.minimal_representation import \
     MinimalStaticOutput
 
@@ -216,6 +218,10 @@
                 raise RuntimeError("You should not instantiate StaticOutput.")
             self._instantiated_hierarchy = self._hierarchy_class(
                 self, data_style=self.data_style)
+            # Now we do things that we need an instantiated hierarchy for
+            if "all" not in self.particle_types:
+                pu = ParticleUnion("all", list(self.particle_types_raw))
+                self.add_particle_union(pu)
         return self._instantiated_hierarchy
     h = hierarchy  # alias
 

diff -r 6526995ce400d7cdf8bf17305995356ee77be80c -r d01dc8c3be7870f6ffc603fa066bb81ac8a959bc yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -35,10 +35,6 @@
     _data_style = "enzo_packed_3d"
     _base = slice(None)
 
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-
     def _read_field_names(self, grid):
         return hdf5_light_reader.ReadListOfDatasets(
                     grid.filename, "/Grid%08i" % grid.id)
@@ -204,9 +200,8 @@
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
     _data_style = "enzo_packed_3d_gz"
 
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
+    def __init__(self, *args, **kwargs):
+        super(IOHandlerPackgedHDF5GhostZones, self).__init__(*args, **kwargs)
         NGZ = self.pf.parameters.get("NumberOfGhostZones", 3)
         self._base = (slice(NGZ, -NGZ),
                       slice(NGZ, -NGZ),
@@ -229,7 +224,7 @@
         self.my_slice = (slice(ghost_zones,-ghost_zones),
                       slice(ghost_zones,-ghost_zones),
                       slice(ghost_zones,-ghost_zones))
-        BaseIOHandler.__init__(self)
+        BaseIOHandler.__init__(self, pf)
 
     def _read_data_set(self, grid, field):
         if grid.id not in self.grids_in_memory:


https://bitbucket.org/yt_analysis/yt-3.0/commits/2fedeb34c567/
Changeset:   2fedeb34c567
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 20:00:18
Summary:     Move around how particle unions are created and avoid recreating objects.
Affected #:  2 files

diff -r d01dc8c3be7870f6ffc603fa066bb81ac8a959bc -r 2fedeb34c567e6c069805ced5dbdba42a6e96bfb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -59,6 +59,8 @@
     _particle_mass_name = None
     _particle_coordinates_name = None
     _particle_velocity_name = None
+    particle_unions = None
+    known_filters = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -95,8 +97,8 @@
         self.file_style = file_style
         self.conversion_factors = {}
         self.parameters = {}
-        self.known_filters = {}
-        self.particle_unions = {}
+        self.known_filters = self.known_filters or {}
+        self.particle_unions = self.particle_unions or {}
 
         # path stuff
         self.parameter_filename = str(filename)

diff -r d01dc8c3be7870f6ffc603fa066bb81ac8a959bc -r 2fedeb34c567e6c069805ced5dbdba42a6e96bfb yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -74,12 +74,12 @@
         mylog.debug("Detecting fields in backup.")
         self._detect_fields_backup()
 
+        mylog.debug("Adding unknown detected fields")
+        self._setup_unknown_fields()
+
         mylog.debug("Setting up particle fields")
         self._setup_particle_types()
 
-        mylog.debug("Adding unknown detected fields")
-        self._setup_unknown_fields()
-
         mylog.debug("Setting up derived fields")
         self._setup_derived_fields()
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/b22329f800b4/
Changeset:   b22329f800b4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 20:32:42
Summary:     A few more text fixes
Affected #:  2 files

diff -r 2fedeb34c567e6c069805ced5dbdba42a6e96bfb -r b22329f800b44ab26e61ff223c1a4867c94d0d84 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -24,11 +24,10 @@
     _particle_reader = False
     _data_style = "flash_hdf5"
 
-    def __init__(self, pf, *args, **kwargs):
+    def __init__(self, pf):
         self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
-        BaseIOHandler.__init__(self, *args, **kwargs)
+        super(IOHandlerFLASH, self).__init__(pf)
         # Now we cache the particle fields
-        self.pf = pf
         self._handle = pf._handle
         self._particle_handle = pf._particle_handle
         

diff -r 2fedeb34c567e6c069805ced5dbdba42a6e96bfb -r b22329f800b44ab26e61ff223c1a4867c94d0d84 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -23,10 +23,8 @@
 class IOHandlerMoabH5MHex8(BaseIOHandler):
     _data_style = "moab_hex8"
 
-    def __init__(self, pf, *args, **kwargs):
-        # TODO check if _num_per_stride is needed
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
+    def __init__(self, pf):
+        super(IOHandlerMoabH5MHex8, self).__init__(pf)
         self._handle = pf._handle
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
@@ -52,11 +50,6 @@
 class IOHandlerMoabPyneHex8(BaseIOHandler):
     _data_style = "moab_hex8_pyne"
 
-    def __init__(self, pf, *args, **kwargs):
-        # TODO check if _num_per_stride is needed
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
         assert(len(chunks) == 1)


https://bitbucket.org/yt_analysis/yt-3.0/commits/e866d950539a/
Changeset:   e866d950539a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 16:58:45
Summary:     Beginning conversion of Enzo to new IO method, switching to "io".
Affected #:  3 files

diff -r b22329f800b44ab26e61ff223c1a4867c94d0d84 -r e866d950539aee7c54f517248db397b557465512 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -50,8 +50,8 @@
 
     default_fluid_type = "gas"
     fluid_types = ("gas","deposit")
-    particle_types = ("all",) # By default we have an 'all'
-    particle_types_raw = ("all",)
+    particle_types = ("io",) # By default we have an 'all'
+    particle_types_raw = ("io",)
     geometry = "cartesian"
     coordinates = None
     max_level = 99

diff -r b22329f800b44ab26e61ff223c1a4867c94d0d84 -r e866d950539aee7c54f517248db397b557465512 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -892,13 +892,16 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
-        self.particle_types = ["enzo"]
+        self.particle_types = ["io"]
         for ptype in self.parameters.get("AppendActiveParticleType", []):
             self.particle_types.append(ptype)
         if self.parameters["NumberOfParticles"] > 0 and \
             "AppendActiveParticleType" in self.parameters.keys():
-            self.particle_types.append("DarkMatter")
+            # If this is the case, then we know we should have a DarkMatter
+            # particle type, and we don't need the "io" type.
+            self.particle_types = ["DarkMatter"]
             self.parameters["AppendActiveParticleType"].append("DarkMatter")
+        self.particle_types = tuple(self.particle_types)
 
         if self.dimensionality == 1:
             self._setup_1d()

diff -r b22329f800b44ab26e61ff223c1a4867c94d0d84 -r e866d950539aee7c54f517248db397b557465512 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -43,7 +43,57 @@
     def _read_exception(self):
         return (exceptions.KeyError, hdf5_light_reader.ReadingError)
 
-    def _read_particle_selection_by_type(self, chunks, selector, fields):
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        for chunk in chunks: # These should be organized by grid filename
+            f = None
+            for g in chunk.objs:
+                if f is None:
+                    mylog.debug("Opening (count) %s", g.filename)
+                    f = h5py.File(g.filename, "r")
+                if g.NumberOfParticles == 0: continue
+                ds = f["/Grid%08i" % g.id]
+                for ptype, field_list in sorted(ptf.items()):
+                    if ptype != "io":
+                        if g.NumberOfActiveParticles[ptype] == 0: continue
+                        pds = ds["Particles/%s" % ptype]
+                    else:
+                        pds = ds
+                    pn = _particle_position_names.get(ptypes[0],
+                            r"particle_position_%s")
+                    x, y, z = (pds[pn % ax][:] for ax in 'xyz')
+                    yield ptype, (x, y, z)
+            f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        for chunk in chunks: # These should be organized by grid filename
+            f = None
+            for g in chunk.objs:
+                if f is None:
+                    mylog.debug("Opening (read) %s", g.filename)
+                    f = h5py.File(g.filename, "r")
+                if g.NumberOfParticles == 0: continue
+                ds = f["/Grid%08i" % g.id]
+                for ptype, field_list in sorted(ptf.items()):
+                    if ptype != "io":
+                        if g.NumberOfActiveParticles[ptype] == 0: continue
+                        pds = ds["Particles/%s" % ptype]
+                    else:
+                        pds = ds
+                    pn = _particle_position_names.get(ptypes[0],
+                            r"particle_position_%s")
+                    x, y, z = (pds[pn % ax][:] for ax in 'xyz')
+                    mask = selector.select_points(x, y, z)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = pds[field]
+                        if field in _convert_mass:
+                            data *= g.dds.prod()
+                        yield (ptype, field), data
+            f.close()
+
+    def __read_particle_selection_by_type(self, chunks, selector, fields):
         rv = {}
         ptypes = list(set([ftype for ftype, fname in fields]))
         fields = list(set(fields))
@@ -83,7 +133,7 @@
                 data.pop(g.id)
         return rv
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def __read_particle_selection(self, chunks, selector, fields):
         last = None
         rv = {}
         chunks = list(chunks)


https://bitbucket.org/yt_analysis/yt-3.0/commits/2f7946397ed5/
Changeset:   2f7946397ed5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 19:04:46
Summary:     Rewriting some of the Enzo IO system.
Affected #:  2 files

diff -r e866d950539aee7c54f517248db397b557465512 -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -422,7 +422,7 @@
                 continue
             gs = self.grids[select_grids > 0]
             g = gs[0]
-            handle = h5py.File(g.filename)
+            handle = h5py.File(g.filename, "r")
             node = handle["/Grid%08i/Particles/" % g.id]
             for ptype in (str(p) for p in node):
                 if ptype not in _fields: continue
@@ -488,18 +488,7 @@
                 field_list = list(set(field_list).union(ap_fields))
         else:
             field_list = None
-        field_list = self.comm.mpi_bcast(field_list)
-        self.field_list = []
-        # Now we will iterate over all fields, trying to avoid the problem of
-        # particle types not having names.  This should convert all known
-        # particle fields that exist in Enzo outputs into the construction
-        # ("all", field) and should not otherwise affect ActiveParticle
-        # simulations.
-        for field in field_list:
-            if ("all", field) in KnownEnzoFields:
-                self.field_list.append(("all", field))
-            else:
-                self.field_list.append(field)
+        self.field_list = list(self.comm.mpi_bcast(field_list))
 
     def _generate_random_grids(self):
         if self.num_grids > 40:

diff -r e866d950539aee7c54f517248db397b557465512 -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,8 +36,18 @@
     _base = slice(None)
 
     def _read_field_names(self, grid):
-        return hdf5_light_reader.ReadListOfDatasets(
-                    grid.filename, "/Grid%08i" % grid.id)
+        f = h5py.File(grid.filename, "r")
+        group = f["/Grid%08i" % grid.id]
+        fields = []
+        for name, v in group.iteritems():
+            # NOTE: This won't work with 1D datasets.
+            if not hasattr(v, "shape"):
+                continue
+            elif len(v.shape) == 1:
+                fields.append( ("io", str(name)) )
+            else:
+                fields.append( ("gas", str(name)) )
+        return fields
 
     @property
     def _read_exception(self):
@@ -59,9 +69,10 @@
                         pds = ds["Particles/%s" % ptype]
                     else:
                         pds = ds
-                    pn = _particle_position_names.get(ptypes[0],
+                    pn = _particle_position_names.get(ptype,
                             r"particle_position_%s")
-                    x, y, z = (pds[pn % ax][:] for ax in 'xyz')
+                    x, y, z = (np.asarray(pds[pn % ax][:], dtype="=f8")
+                               for ax in 'xyz')
                     yield ptype, (x, y, z)
             f.close()
 
@@ -81,138 +92,19 @@
                         pds = ds["Particles/%s" % ptype]
                     else:
                         pds = ds
-                    pn = _particle_position_names.get(ptypes[0],
+                    pn = _particle_position_names.get(ptype,
                             r"particle_position_%s")
-                    x, y, z = (pds[pn % ax][:] for ax in 'xyz')
+                    x, y, z = (np.asarray(pds[pn % ax][:], dtype="=f8")
+                               for ax in 'xyz')
                     mask = selector.select_points(x, y, z)
                     if mask is None: continue
                     for field in field_list:
-                        data = pds[field]
+                        data = np.asarray(pds[field], "=f8")
                         if field in _convert_mass:
                             data *= g.dds.prod()
                         yield (ptype, field), data
             f.close()
 
-    def __read_particle_selection_by_type(self, chunks, selector, fields):
-        rv = {}
-        ptypes = list(set([ftype for ftype, fname in fields]))
-        fields = list(set(fields))
-        if len(ptypes) > 1: raise NotImplementedError
-        pn = _particle_position_names.get(ptypes[0], r"particle_position_%s")
-        pfields = [(ptypes[0], pn % ax) for ax in 'xyz']
-        size = 0
-        for chunk in chunks:
-            data = self._read_chunk_data(chunk, pfields, 'active', 
-                        "/Particles/%s" % ptypes[0])
-            for g in chunk.objs:
-                if g.NumberOfActiveParticles[ptypes[0]] == 0: continue
-                x, y, z = (data[g.id].pop(fn) for ft, fn in pfields)
-                size += g.count_particles(selector, x, y, z)
-        read_fields = fields[:]
-        for field in fields:
-            # TODO: figure out dataset types
-            rv[field] = np.empty(size, dtype='float64')
-        for pfield in pfields:
-            if pfield not in fields: read_fields.append(pfield)
-        ind = 0
-        for chunk in chunks:
-            data = self._read_chunk_data(chunk, read_fields, 'active',
-                        "/Particles/%s" % ptypes[0])
-            for g in chunk.objs:
-                if g.NumberOfActiveParticles[ptypes[0]] == 0: continue
-                x, y, z = (data[g.id][fn] for ft, fn in pfields)
-                mask = g.select_particles(selector, x, y, z)
-                if mask is None: continue
-                for field in set(fields):
-                    ftype, fname = field
-                    gdata = data[g.id].pop(fname)[mask]
-                    if fname in _convert_mass:
-                        gdata *= g.dds.prod()
-                    rv[field][ind:ind+gdata.size] = gdata
-                ind += gdata.size
-                data.pop(g.id)
-        return rv
-
-    def __read_particle_selection(self, chunks, selector, fields):
-        last = None
-        rv = {}
-        chunks = list(chunks)
-        # Now we have to do something unpleasant
-        if any((ftype != "all" for ftype, fname in fields)):
-            type_fields = [(ftype, fname) for ftype, fname in fields
-                           if ftype != "all"]
-            rv.update(self._read_particle_selection_by_type(
-                      chunks, selector, type_fields))
-            if len(rv) == len(fields): return rv
-            fields = [f for f in fields if f not in rv]
-        mylog.debug("First pass: counting particles.")
-        xn, yn, zn = ("particle_position_%s" % ax for ax in 'xyz')
-        size = 0
-        pfields = [("all", "particle_position_%s" % ax) for ax in 'xyz']
-        for chunk in chunks:
-            data = self._read_chunk_data(chunk, pfields, 'any')
-            for g in chunk.objs:
-                if g.NumberOfParticles == 0: continue
-                x, y, z = (
-                    np.asarray(data[g.id].pop("particle_position_%s" % ax),
-                               dtype="float64")
-                    for ax in 'xyz')
-                size += g.count_particles(selector, x, y, z)
-        read_fields = fields[:]
-        for field in fields:
-            # TODO: figure out dataset types
-            rv[field] = np.empty(size, dtype='float64')
-        for pfield in pfields:
-            if pfield not in fields: read_fields.append(pfield)
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s grids",
-                   size, [f2 for f1, f2 in fields], ng)
-        ind = 0
-        for chunk in chunks:
-            data = self._read_chunk_data(chunk, read_fields, 'any')
-            for g in chunk.objs:
-                if g.NumberOfParticles == 0: continue
-                x, y, z = (data[g.id]["particle_position_%s" % ax]
-                           for ax in 'xyz')
-                x, y, z = (np.array(arr, dtype='float64') for arr in (x, y, z))
-                mask = g.select_particles(selector, x, y, z)
-                if mask is None: continue
-                for field in set(fields):
-                    ftype, fname = field
-                    gdata = data[g.id].pop(fname)[mask]
-                    if fname in _convert_mass:
-                        gdata *= g.dds.prod()
-                    rv[field][ind:ind+gdata.size] = gdata
-                ind += gdata.size
-        return rv
-        
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        rv = {}
-        # Now we have to do something unpleasant
-        chunks = list(chunks)
-        if selector.__class__.__name__ == "GridSelector":
-            return self._read_grid_chunk(chunks, fields)
-        if any((ftype != "gas" for ftype, fname in fields)):
-            raise NotImplementedError
-        for field in fields:
-            ftype, fname = field
-            fsize = size
-            rv[field] = np.empty(fsize, dtype="float64")
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s grids",
-                   size, [f2 for f1, f2 in fields], ng)
-        ind = 0
-        for chunk in chunks:
-            data = self._read_chunk_data(chunk, fields)
-            for g in chunk.objs:
-                for field in fields:
-                    ftype, fname = field
-                    ds = data[g.id].pop(fname).swapaxes(0,2)
-                    nd = g.select(selector, ds, rv[field], ind) # caches
-                ind += nd
-                data.pop(g.id)
-        return rv
-
     def _read_grid_chunk(self, chunks, fields):
         sets = [fname for ftype, fname in fields]
         g = chunks[0].objs[0]
@@ -224,17 +116,11 @@
             rv[(ftype, fname)] = rv.pop(fname).swapaxes(0,2)
         return rv
 
-    def _read_chunk_data(self, chunk, fields, filter_particles = False,
-                         suffix = ""):
+    def _read_chunk_data(self, chunk, fields, suffix = ""):
         data = {}
         grids_by_file = defaultdict(list)
         for g in chunk.objs:
-            if filter_particles == 'any' and g.NumberOfParticles == 0:
-                continue
-            elif filter_particles == 'active' and \
-                 g.NumberOfActiveParticles[fields[0][0]] == 0:
-                continue
-            elif g.filename is None:
+            if g.filename is None:
                 continue
             grids_by_file[g.filename].append(g.id)
         #if len(chunk.objs) == 1 and len(grids_by_file) > 0:


https://bitbucket.org/yt_analysis/yt-3.0/commits/9eec4fd561d6/
Changeset:   9eec4fd561d6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 19:44:40
Summary:     Cleaning up much more of the Enzo particle handling
Affected #:  5 files

diff -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a -r 9eec4fd561d6136d204fdd0b728c8775e73a69f1 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -337,6 +337,9 @@
         self.h._setup_unknown_fields(fields)
         self.h._setup_particle_types([union.name])
 
+    def _setup_particle_type(self, ptype):
+        mylog.debug("Don't know what to do with %s", ptype)
+
     @property
     def particle_fields_by_type(self):
         fields = defaultdict(list)

diff -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a -r 9eec4fd561d6136d204fdd0b728c8775e73a69f1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -46,7 +46,7 @@
 from .fields import \
     EnzoFieldInfo, Enzo2DFieldInfo, Enzo1DFieldInfo, \
     add_enzo_field, add_enzo_2d_field, add_enzo_1d_field, \
-    KnownEnzoFields
+    KnownEnzoFields, _setup_particle_fields
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
@@ -991,6 +991,9 @@
             return True
         return os.path.exists("%s.hierarchy" % args[0])
 
+    def _setup_particle_type(self, ptype):
+        _setup_particle_fields(self.field_info, ptype)
+
 class EnzoStaticOutputInMemory(EnzoStaticOutput):
     _hierarchy_class = EnzoHierarchyInMemory
     _data_style = 'enzo_inline'

diff -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a -r 9eec4fd561d6136d204fdd0b728c8775e73a69f1 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -480,47 +480,8 @@
 
 # Particle functions
 
-for pf in ["type", "mass"] + \
-          ["position_%s" % ax for ax in 'xyz']:
-    add_enzo_field(('all',"particle_%s" % pf), NullFunc, particle_type=True)
-
-def _convRetainInt(data):
-    return 1
-add_enzo_field(("all", "particle_index"), function=NullFunc,
-          particle_type=True, convert_function=_convRetainInt)
-
-def _get_vel_convert(ax):
-    def _convert_p_vel(data):
-        return data.convert("%s-velocity" % ax)
-    return _convert_p_vel
-for ax in 'xyz':
-    pf = "particle_velocity_%s" % ax
-    cfunc = _get_vel_convert(ax)
-    add_enzo_field(("all", pf), function=NullFunc, convert_function=cfunc,
-              particle_type=True)
-
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"] \
-        + ["particle_position_%s" % ax for ax in 'xyz'] \
-        + ["particle_velocity_%s" % ax for ax in 'xyz']:
-    add_enzo_field(pf, function=NullFunc,
-              validators = [ValidateDataField(pf)],
-              particle_type=True)
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/mass_sun_cgs)
 # We have now multiplied by grid.dds.prod() inside the IO function.
 # So here we multiply just by the conversion to density.
-add_field(('all', "particle_mass"), function=NullFunc, 
-          particle_type=True, convert_function = _convertParticleMass)
-
-add_field("ParticleMass",
-          function=TranslationFunc("particle_mass"),
-          particle_type=True, convert_function=_convertParticleMass)
-add_field("ParticleMassMsun",
-          function=TranslationFunc("particle_mass"),
-          particle_type=True, convert_function=_convertParticleMassMsun)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -601,13 +562,32 @@
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)
 
-for ax in 'xyz':
-    add_field(("CenOstriker","particle_position_%s" % ax),
-               function=TranslationFunc(("CenOstriker","position_%s" % ax)),
-               particle_type = True)
+def _get_vel_convert(ax):
+    def _convert_p_vel(data):
+        return data.convert("%s-velocity" % ax)
+    return _convert_p_vel
 
-particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
-                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
-                          EnzoFieldInfo)
-particle_deposition_functions("all", "Coordinates", "ParticleMass",
-                               EnzoFieldInfo)
+def _convertParticleMass(data):
+    return data.pf.conversion_factors["Density"] * \
+           data.pf.units["cm"]**3.0
+
+def _setup_particle_fields(registry, ptype):
+    particle_vector_functions(ptype, ["particle_position_%s" % ax for ax in 'xyz'],
+                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
+                              registry)
+    particle_deposition_functions(ptype, "Coordinates", "particle_mass",
+                                   registry)
+
+    for ax in 'xyz':
+        fn = "particle_velocity_%s" % ax
+        cfunc = _get_vel_convert(ax)
+        registry.add_field((ptype, fn), function=NullFunc,
+                  convert_function=_get_vel_convert(ax),
+                  particle_type=True)
+    for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+              ["particle_position_%s" % ax for ax in 'xyz']:
+        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
+
+    registry.add_field((ptype, "particle_mass"), function=NullFunc, 
+              particle_type=True, convert_function = _convertParticleMass)
+

diff -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a -r 9eec4fd561d6136d204fdd0b728c8775e73a69f1 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -59,7 +59,6 @@
             f = None
             for g in chunk.objs:
                 if f is None:
-                    mylog.debug("Opening (count) %s", g.filename)
                     f = h5py.File(g.filename, "r")
                 if g.NumberOfParticles == 0: continue
                 ds = f["/Grid%08i" % g.id]
@@ -82,7 +81,6 @@
             f = None
             for g in chunk.objs:
                 if f is None:
-                    mylog.debug("Opening (read) %s", g.filename)
                     f = h5py.File(g.filename, "r")
                 if g.NumberOfParticles == 0: continue
                 ds = f["/Grid%08i" % g.id]
@@ -101,7 +99,7 @@
                     for field in field_list:
                         data = np.asarray(pds[field], "=f8")
                         if field in _convert_mass:
-                            data *= g.dds.prod()
+                            data *= g.dds.prod(dtype="f8")
                         yield (ptype, field), data
             f.close()
 

diff -r 2f7946397ed53cdb823661cb14e44e96e5a1d80a -r 9eec4fd561d6136d204fdd0b728c8775e73a69f1 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -141,8 +141,12 @@
         cname = self.pf._particle_coordinates_name
         vname = self.pf._particle_velocity_name
         # We require overriding if any of this is true
-        if None in (mname, cname, vname): return
         if ptypes is None: ptypes = self.pf.particle_types_raw
+        if None in (mname, cname, vname): 
+            # If we don't know what to do, then let's not.
+            for ptype in ptypes:
+                self.pf._setup_particle_type(ptype)
+            return
         fi = self.pf.field_info
         def _get_conv(cf):
             def _convert(data):


https://bitbucket.org/yt_analysis/yt-3.0/commits/12ef9421f452/
Changeset:   12ef9421f452
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 19:58:12
Summary:     Adding a memory checker routine.
Affected #:  1 file

diff -r 9eec4fd561d6136d204fdd0b728c8775e73a69f1 -r 12ef9421f4528c5510e0e93c05470b937bea8b3d yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -633,3 +633,22 @@
     for l in some_list[1:]:
         s.intersection_update(l)
     return s
+
+ at contextlib.contextmanager
+def memory_checker(interval = 15):
+    import threading
+    class MemoryChecker(threading.Thread):
+        def __init__(self, event, interval):
+            self.event = event
+            self.interval = interval
+            threading.Thread.__init__(self)
+
+        def run(self):
+            while not self.event.wait(self.interval):
+                print "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
+
+    e = threading.Event()
+    mem_check = MemoryChecker(e, interval)
+    mem_check.start()
+    yield
+    e.set()


https://bitbucket.org/yt_analysis/yt-3.0/commits/343ee1bfa5d2/
Changeset:   343ee1bfa5d2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 20:53:53
Summary:     A few h5py optimizations for Enzo.
Affected #:  3 files

diff -r 12ef9421f4528c5510e0e93c05470b937bea8b3d -r 343ee1bfa5d25e8fb3355b6818edcffe1eb6a978 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -43,7 +43,7 @@
             # NOTE: This won't work with 1D datasets.
             if not hasattr(v, "shape"):
                 continue
-            elif len(v.shape) == 1:
+            elif len(v.dims) == 1:
                 fields.append( ("io", str(name)) )
             else:
                 fields.append( ("gas", str(name)) )
@@ -59,18 +59,19 @@
             f = None
             for g in chunk.objs:
                 if f is None:
+                    #print "Opening (count) %s" % g.filename
                     f = h5py.File(g.filename, "r")
                 if g.NumberOfParticles == 0: continue
-                ds = f["/Grid%08i" % g.id]
+                ds = f.get("/Grid%08i" % g.id)
                 for ptype, field_list in sorted(ptf.items()):
                     if ptype != "io":
                         if g.NumberOfActiveParticles[ptype] == 0: continue
-                        pds = ds["Particles/%s" % ptype]
+                        pds = ds.get("Particles/%s" % ptype)
                     else:
                         pds = ds
                     pn = _particle_position_names.get(ptype,
                             r"particle_position_%s")
-                    x, y, z = (np.asarray(pds[pn % ax][:], dtype="=f8")
+                    x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
                                for ax in 'xyz')
                     yield ptype, (x, y, z)
             f.close()
@@ -81,23 +82,24 @@
             f = None
             for g in chunk.objs:
                 if f is None:
+                    #print "Opening (read) %s" % g.filename
                     f = h5py.File(g.filename, "r")
                 if g.NumberOfParticles == 0: continue
-                ds = f["/Grid%08i" % g.id]
+                ds = f.get("/Grid%08i" % g.id)
                 for ptype, field_list in sorted(ptf.items()):
                     if ptype != "io":
                         if g.NumberOfActiveParticles[ptype] == 0: continue
-                        pds = ds["Particles/%s" % ptype]
+                        pds = ds.get("Particles/%s" % ptype)
                     else:
                         pds = ds
                     pn = _particle_position_names.get(ptype,
                             r"particle_position_%s")
-                    x, y, z = (np.asarray(pds[pn % ax][:], dtype="=f8")
+                    x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
                                for ax in 'xyz')
                     mask = selector.select_points(x, y, z)
                     if mask is None: continue
                     for field in field_list:
-                        data = np.asarray(pds[field], "=f8")
+                        data = np.asarray(pds.get(field).value, "=f8")
                         if field in _convert_mass:
                             data *= g.dds.prod(dtype="f8")
                         yield (ptype, field), data

diff -r 12ef9421f4528c5510e0e93c05470b937bea8b3d -r 343ee1bfa5d25e8fb3355b6818edcffe1eb6a978 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -233,10 +233,6 @@
             dobj._chunk_info = np.empty(len(grids), dtype='object')
             for i, g in enumerate(grids):
                 dobj._chunk_info[i] = g
-        if getattr(dobj, "size", None) is None:
-            dobj.size = self._count_selection(dobj)
-        if getattr(dobj, "shape", None) is None:
-            dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
 
     def _count_selection(self, dobj, grids = None):
@@ -246,7 +242,7 @@
 
     def _chunk_all(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
+        yield YTDataChunk(dobj, "all", gobjs, None, cache)
         
     def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -276,6 +272,5 @@
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
             gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
-                              cache = cache)
+            yield YTDataChunk(dobj, "io", gs, None, cache = cache)
 

diff -r 12ef9421f4528c5510e0e93c05470b937bea8b3d -r 343ee1bfa5d25e8fb3355b6818edcffe1eb6a978 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -41,6 +41,8 @@
     def __init__(self, pf):
         self.queue = defaultdict(dict)
         self.pf = pf
+        self._last_selector_id = None
+        self._last_selector_counts = None
 
     # We need a function for reading a list of sets
     # and a function for *popping* from a queue all the appropriate sets
@@ -136,11 +138,17 @@
             else:
                 ptf[ftype].append(fname)
                 field_maps[field].append(field)
-        # Now we have our full listing.
-        # Here, ptype_map means which particles contribute to a given type.
-        # And ptf is the actual fields from disk to read.
-        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
-            psize[ptype] += selector.count_points(x, y, z)
+        if hash(selector) == self._last_selector_id and \
+           all(ptype in self._last_selector_counts for ptype in ptf):
+            psize.update(self._last_selector_counts)
+        else:
+            # Now we have our full listing.
+            # Here, ptype_map means which particles contribute to a given type.
+            # And ptf is the actual fields from disk to read.
+            for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+                psize[ptype] += selector.count_points(x, y, z)
+            self._last_selector_counts = dict(**psize)
+            self._last_selector_id = hash(selector)
         # Now we allocate
         # ptf, remember, is our mapping of what we want to read
         #for ptype in ptf:
@@ -162,8 +170,8 @@
             # Note that we now need to check the mappings
             for field_f in field_maps[field_r]:
                 my_ind = ind[field_f]
-                mylog.debug("Filling %s from %s to %s with %s",
-                    field_f, my_ind, my_ind+vals.shape[0], field_r)
+                #mylog.debug("Filling %s from %s to %s with %s",
+                #    field_f, my_ind, my_ind+vals.shape[0], field_r)
                 rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
                 ind[field_f] += vals.shape[0]
         return rv


https://bitbucket.org/yt_analysis/yt-3.0/commits/e3a96d14bd46/
Changeset:   e3a96d14bd46
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 20:55:34
Summary:     Disable selector hashing, but leave the stubs.
Affected #:  1 file

diff -r 343ee1bfa5d25e8fb3355b6818edcffe1eb6a978 -r e3a96d14bd4680b6728a12ab98c4bfb25b1d3309 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -138,7 +138,8 @@
             else:
                 ptf[ftype].append(fname)
                 field_maps[field].append(field)
-        if hash(selector) == self._last_selector_id and \
+        # We can't hash chunks, but otherwise this is a neat idea.
+        if 0 and hash(selector) == self._last_selector_id and \
            all(ptype in self._last_selector_counts for ptype in ptf):
             psize.update(self._last_selector_counts)
         else:


https://bitbucket.org/yt_analysis/yt-3.0/commits/a4938bb6adc4/
Changeset:   a4938bb6adc4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 21:25:21
Summary:     Somehow we never ran into this problem before.

But, we need to use _get_field_info for the field during IO.
Affected #:  1 file

diff -r e3a96d14bd4680b6728a12ab98c4bfb25b1d3309 -r a4938bb6adc4fbe86cf1dc20dfe50ce43b31b58c yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -519,7 +519,8 @@
             chunk_size)
         for field in fields_to_read:
             ftype, fname = field
-            conv_factor = self.pf.field_info[fname]._convert_function(self)
+            finfo = self.pf._get_field_info(*field)
+            conv_factor = finfo._convert_function(self)
             np.multiply(fields_to_return[field], conv_factor,
                         fields_to_return[field])
         #mylog.debug("Don't know how to read %s", fields_to_generate)


https://bitbucket.org/yt_analysis/yt-3.0/commits/2895661ea6de/
Changeset:   2895661ea6de
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 22:00:35
Summary:     Port fluid fields to h5py from hdf5_light_reader.  Fix fields in Enzo.

Note that this results in a performance hit for yt-3.0.  But, for yt-2.x, it's
still a net performance gain.

Also note that now because Enzo returns correctly named "gas" fields, we have
to change a few things about how it accesses and defines fields.
Affected #:  3 files

diff -r a4938bb6adc4fbe86cf1dc20dfe50ce43b31b58c -r 2895661ea6de31873810d9d3e97206e1d57b99c0 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -82,26 +82,21 @@
 for species in _speciesList:
     add_field("%s_Fraction" % species,
              function=_SpeciesFraction,
-             validators=ValidateDataField("%s_Density" % species),
              display_name="%s\/Fraction" % species)
     add_field("Comoving_%s_Density" % species,
              function=_SpeciesComovingDensity,
-             validators=ValidateDataField("%s_Density" % species),
              display_name="Comoving\/%s\/Density" % species)
     add_field("%s_Mass" % species, units=r"\rm{g}", 
               function=_SpeciesMass, 
-              validators=ValidateDataField("%s_Density" % species),
               display_name="%s\/Mass" % species)
     add_field("%s_MassMsun" % species, units=r"M_{\odot}", 
               function=_SpeciesMass, 
               convert_function=_convertCellMassMsun,
-              validators=ValidateDataField("%s_Density" % species),
               display_name="%s\/Mass" % species)
     if _speciesMass.has_key(species):
         add_field("%s_NumberDensity" % species,
                   function=_SpeciesNumberDensity,
-                  convert_function=_ConvertNumberDensity,
-                  validators=ValidateDataField("%s_Density" % species))
+                  convert_function=_ConvertNumberDensity)
 
 def _Metallicity(field, data):
     return data["Metal_Fraction"]
@@ -110,7 +105,6 @@
 add_field("Metallicity", units=r"Z_{\rm{\odot}}",
           function=_Metallicity,
           convert_function=_ConvertMetallicity,
-          validators=ValidateDataField("Metal_Density"),
           projection_conversion="1")
 
 def _Metallicity3(field, data):
@@ -118,12 +112,10 @@
 add_field("Metallicity3", units=r"Z_{\rm{\odot}}",
           function=_Metallicity3,
           convert_function=_ConvertMetallicity,
-          validators=ValidateDataField("SN_Colour"),
           projection_conversion="1")
 
 add_enzo_field("Cooling_Time", units=r"\rm{s}",
                function=NullFunc,
-               validators=ValidateDataField("Cooling_Time"),
                projection_conversion="1")
 
 def _ThermalEnergy(field, data):
@@ -269,8 +261,7 @@
 for field in _default_fields:
     dn = field.replace("_","\/")
     add_enzo_field(field, function=NullFunc, take_log=True,
-              display_name = dn,
-              validators=[ValidateDataField(field)], units=r"Unknown")
+              display_name = dn, units=r"Unknown")
 KnownEnzoFields["x-velocity"].projection_conversion='1'
 KnownEnzoFields["y-velocity"].projection_conversion='1'
 KnownEnzoFields["z-velocity"].projection_conversion='1'
@@ -307,7 +298,6 @@
              data["RadAccel3"]**2 )**(1.0/2.0)
 add_field("RadiationAcceleration", 
           function=_RadiationAccelerationMagnitude,
-          validators=ValidateDataField(["RadAccel1", "RadAccel2", "RadAccel3"]),
           display_name="Radiation\/Acceleration", units=r"\rm{cm} \rm{s}^{-2}")
 
 # Now we override
@@ -322,19 +312,16 @@
 
 add_enzo_field("Dark_Matter_Density", function=NullFunc,
           convert_function=_convertDensity,
-          validators=[ValidateDataField("Dark_Matter_Density"),
-                      ValidateSpatial(0)],
+          validators=[ValidateSpatial(0)],
           display_name = "Dark\/Matter\/Density",
           not_in_all = True)
 
 def _Dark_Matter_Mass(field, data):
     return data['Dark_Matter_Density'] * data["CellVolume"]
 add_field("Dark_Matter_Mass", function=_Dark_Matter_Mass,
-          validators=ValidateDataField("Dark_Matter_Density"),
           display_name="Dark\/Matter\/Mass", units=r"\rm{g}")
 add_field("Dark_Matter_MassMsun", function=_Dark_Matter_Mass,
           convert_function=_convertCellMassMsun,
-          validators=ValidateDataField("Dark_Matter_Density"),
           display_name="Dark\/Matter\/Mass", units=r"M_{\odot}")
 
 KnownEnzoFields["Temperature"]._units = r"\rm{K}"
@@ -489,7 +476,6 @@
 def _convertParticleAge(data):
     return data.convert("years")
 add_field("ParticleAge", function=_ParticleAge,
-          validators=[ValidateDataField("creation_time")],
           particle_type=True, convert_function=_convertParticleAge)
 
 

diff -r a4938bb6adc4fbe86cf1dc20dfe50ce43b31b58c -r 2895661ea6de31873810d9d3e97206e1d57b99c0 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -133,6 +133,49 @@
                 filename, nodes, sets, suffix))
         return data
 
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        # Now we have to do something unpleasant
+        chunks = list(chunks)
+        if selector.__class__.__name__ == "GridSelector":
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            g = chunks[0].objs[0]
+            f = h5py.File(g.filename)
+            gds = f.get("/Grid%08i" % g.id)
+            for ftype, fname in fields:
+                rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0,2)
+            f.close()
+            return rv
+        if size is None:
+            size = sum((g.count(selector) for chunk in chunks
+                        for g in chunk.objs))
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        for field in fields:
+            ftype, fname = field
+            fsize = size
+            rv[field] = np.empty(fsize, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                   size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        for chunk in chunks:
+            f = None
+            for g in chunk.objs:
+                if f is None:
+                    #print "Opening (count) %s" % g.filename
+                    f = h5py.File(g.filename, "r")
+                gds = f.get("/Grid%08i" % g.id)
+                for field in fields:
+                    ftype, fname = field
+                    ds = gds.get(fname).value.swapaxes(0,2)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
+                ind += nd
+            f.close()
+        return rv
+
+
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
     _data_style = "enzo_packed_3d_gz"
 
@@ -220,7 +263,18 @@
         # Now we have to do something unpleasant
         chunks = list(chunks)
         if selector.__class__.__name__ == "GridSelector":
-            return self._read_grid_chunk(chunks, fields)
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            g = chunks[0].objs[0]
+            f = h5py.File(g.filename)
+            gds = f.get("/Grid%08i" % g.id)
+            for ftype, fname in fields:
+                rv[(ftype, fname)] = np.atleast_3d(gds.get(fname).value)
+            f.close()
+            return rv
+        if size is None:
+            size = sum((g.count(selector) for chunk in chunks
+                        for g in chunk.objs))
         if any((ftype != "gas" for ftype, fname in fields)):
             raise NotImplementedError
         for field in fields:
@@ -232,17 +286,20 @@
                    size, [f2 for f1, f2 in fields], ng)
         ind = 0
         for chunk in chunks:
-            data = self._read_chunk_data(chunk, fields)
+            f = None
             for g in chunk.objs:
+                if f is None:
+                    #print "Opening (count) %s" % g.filename
+                    f = h5py.File(g.filename, "r")
+                gds = f.get("/Grid%08i" % g.id)
                 for field in fields:
                     ftype, fname = field
-                    ds = np.atleast_3d(data[g.id].pop(fname))
-                    nd = g.select(selector, ds, rv[field], ind)  # caches
+                    ds = np.atleast_3d(gds.get(fname).value)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
                 ind += nd
-                data.pop(g.id)
+            f.close()
         return rv
 
-
 class IOHandlerPacked1D(IOHandlerPackedHDF5):
 
     _data_style = "enzo_packed_1d"

diff -r a4938bb6adc4fbe86cf1dc20dfe50ce43b31b58c -r 2895661ea6de31873810d9d3e97206e1d57b99c0 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -171,6 +171,7 @@
     def _setup_unknown_fields(self, list_of_fields = None):
         known_fields = self.parameter_file._fieldinfo_known
         field_list = list_of_fields or self.field_list
+        dftype = self.parameter_file.default_fluid_type
         mylog.debug("Checking %s", field_list)
         for field in field_list:
             # By allowing a backup, we don't mandate that it's found in our
@@ -178,11 +179,17 @@
             # it.
             ff = self.parameter_file.field_info.pop(field, None)
             if field not in known_fields:
+                # Now we check if it's a gas field or what ...
                 if isinstance(field, types.TupleType) and \
                    field[0] in self.parameter_file.particle_types:
                     particle_type = True
                 else:
                     particle_type = False
+                if not particle_type and field[1] in known_fields:
+                    print "Adding known field %s to list of fields", field
+                    mylog.debug("Adding known field %s to list of fields", field)
+                    self.pf.field_info[field] = known_fields[field[1]]
+                    continue
                 rootloginfo("Adding unknown field %s to list of fields", field)
                 cf = None
                 if self.parameter_file.has_key(field):


https://bitbucket.org/yt_analysis/yt-3.0/commits/243f5c859730/
Changeset:   243f5c859730
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 22:12:22
Summary:     Minor speed boost for Enzo IO.
Affected #:  1 file

diff -r 2895661ea6de31873810d9d3e97206e1d57b99c0 -r 243f5c859730b9525e9597571e07dd4f9fed9a40 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -150,8 +150,6 @@
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
                         for g in chunk.objs))
-        if any((ftype != "gas" for ftype, fname in fields)):
-            raise NotImplementedError
         for field in fields:
             ftype, fname = field
             fsize = size
@@ -166,10 +164,9 @@
                 if f is None:
                     #print "Opening (count) %s" % g.filename
                     f = h5py.File(g.filename, "r")
-                gds = f.get("/Grid%08i" % g.id)
                 for field in fields:
                     ftype, fname = field
-                    ds = gds.get(fname).value.swapaxes(0,2)
+                    ds = f.get("/Grid%08i/%s" % (g.id, fname)).value.swapaxes(0,2)
                     nd = g.select(selector, ds, rv[field], ind) # caches
                 ind += nd
             f.close()


https://bitbucket.org/yt_analysis/yt-3.0/commits/9d210a2f2d27/
Changeset:   9d210a2f2d27
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 23:06:24
Summary:     Track derived fields from particles around a bit.
Affected #:  4 files

diff -r 243f5c859730b9525e9597571e07dd4f9fed9a40 -r 9d210a2f2d27d6350010812daaebdce023a91718 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -339,6 +339,7 @@
 
     def _setup_particle_type(self, ptype):
         mylog.debug("Don't know what to do with %s", ptype)
+        return []
 
     @property
     def particle_fields_by_type(self):

diff -r 243f5c859730b9525e9597571e07dd4f9fed9a40 -r 9d210a2f2d27d6350010812daaebdce023a91718 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -992,7 +992,8 @@
         return os.path.exists("%s.hierarchy" % args[0])
 
     def _setup_particle_type(self, ptype):
-        _setup_particle_fields(self.field_info, ptype)
+        df = _setup_particle_fields(self.field_info, ptype)
+        return df
 
 class EnzoStaticOutputInMemory(EnzoStaticOutput):
     _hierarchy_class = EnzoHierarchyInMemory

diff -r 243f5c859730b9525e9597571e07dd4f9fed9a40 -r 9d210a2f2d27d6350010812daaebdce023a91718 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -558,22 +558,28 @@
            data.pf.units["cm"]**3.0
 
 def _setup_particle_fields(registry, ptype):
-    particle_vector_functions(ptype, ["particle_position_%s" % ax for ax in 'xyz'],
-                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
-                              registry)
-    particle_deposition_functions(ptype, "Coordinates", "particle_mass",
-                                   registry)
+    df = []
+    df += particle_vector_functions(ptype,
+            ["particle_position_%s" % ax for ax in 'xyz'],
+            ["particle_velocity_%s" % ax for ax in 'xyz'],
+            registry)
+    df += particle_deposition_functions(ptype,
+            "Coordinates", "particle_mass",
+            registry)
 
     for ax in 'xyz':
         fn = "particle_velocity_%s" % ax
         cfunc = _get_vel_convert(ax)
+        df.append((ptype, fn))
         registry.add_field((ptype, fn), function=NullFunc,
                   convert_function=_get_vel_convert(ax),
                   particle_type=True)
     for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
               ["particle_position_%s" % ax for ax in 'xyz']:
+        df.append((ptype, fn))
         registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
 
     registry.add_field((ptype, "particle_mass"), function=NullFunc, 
               particle_type=True, convert_function = _convertParticleMass)
-
+    df.append((ptype, "particle_mass"))
+    return df

diff -r 243f5c859730b9525e9597571e07dd4f9fed9a40 -r 9d210a2f2d27d6350010812daaebdce023a91718 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -77,12 +77,12 @@
         mylog.debug("Adding unknown detected fields")
         self._setup_unknown_fields()
 
+        mylog.debug("Setting up derived fields")
+        self._setup_derived_fields()
+
         mylog.debug("Setting up particle fields")
         self._setup_particle_types()
 
-        mylog.debug("Setting up derived fields")
-        self._setup_derived_fields()
-
     def __del__(self):
         if self._data_file is not None:
             self._data_file.close()
@@ -141,12 +141,15 @@
         cname = self.pf._particle_coordinates_name
         vname = self.pf._particle_velocity_name
         # We require overriding if any of this is true
+        df = []
         if ptypes is None: ptypes = self.pf.particle_types_raw
         if None in (mname, cname, vname): 
             # If we don't know what to do, then let's not.
             for ptype in ptypes:
-                self.pf._setup_particle_type(ptype)
-            return
+                df += self.pf._setup_particle_type(ptype)
+            # Now we have a bunch of new fields to add!
+            # This is where the dependencies get calculated.
+            self._derived_fields_add(df)
         fi = self.pf.field_info
         def _get_conv(cf):
             def _convert(data):
@@ -157,16 +160,20 @@
                 particle_type = True,
                 convert_function=_get_conv("velocity"),
                 units = r"\mathrm{cm}/\mathrm{s}")
+            df.append((ptype, vname))
             fi.add_field((ptype, mname), function=NullFunc,
                 particle_type = True,
                 convert_function=_get_conv("mass"),
                 units = r"\mathrm{g}")
-            particle_deposition_functions(ptype, cname, mname, fi)
-            particle_scalar_functions(ptype, cname, vname, fi)
+            df.append((ptype, mname))
+            df += particle_deposition_functions(ptype, cname, mname, fi)
+            df += particle_scalar_functions(ptype, cname, vname, fi)
             fi.add_field((ptype, cname), function=NullFunc,
                          particle_type = True)
+            df.append((ptype, cname))
             # Now we add some translations.
-            self.pf._setup_particle_type(ptype)
+            df += self.pf._setup_particle_type(ptype)
+        self._derived_fields_add(df)
 
     def _setup_unknown_fields(self, list_of_fields = None):
         known_fields = self.parameter_file._fieldinfo_known
@@ -186,7 +193,6 @@
                 else:
                     particle_type = False
                 if not particle_type and field[1] in known_fields:
-                    print "Adding known field %s to list of fields", field
                     mylog.debug("Adding known field %s to list of fields", field)
                     self.pf.field_info[field] = known_fields[field[1]]
                     continue


https://bitbucket.org/yt_analysis/yt-3.0/commits/f46a3691db0b/
Changeset:   f46a3691db0b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 23:09:14
Summary:     Turns out these weren't big improvements, and it broke lots of tests.
Affected #:  1 file

diff -r 9d210a2f2d27d6350010812daaebdce023a91718 -r f46a3691db0b51455294d3b8acdf04c6f26449cb yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -233,6 +233,10 @@
             dobj._chunk_info = np.empty(len(grids), dtype='object')
             for i, g in enumerate(grids):
                 dobj._chunk_info[i] = g
+        if getattr(dobj, "size", None) is None:
+            dobj.size = self._count_selection(dobj)
+        if getattr(dobj, "shape", None) is None:
+            dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
 
     def _count_selection(self, dobj, grids = None):
@@ -242,7 +246,7 @@
 
     def _chunk_all(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, None, cache)
+        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
     def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -272,5 +276,6 @@
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
             gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, None, cache = cache)
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/aea239f8bd73/
Changeset:   aea239f8bd73
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 23:11:53
Summary:     Forgot about a few one-char fields.
Affected #:  1 file

diff -r f46a3691db0b51455294d3b8acdf04c6f26449cb -r aea239f8bd7385cd518c4b8dee3a9ab4697cec25 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -187,12 +187,12 @@
             ff = self.parameter_file.field_info.pop(field, None)
             if field not in known_fields:
                 # Now we check if it's a gas field or what ...
-                if isinstance(field, types.TupleType) and \
-                   field[0] in self.parameter_file.particle_types:
+                if isinstance(field, tuple) and field[0] in self.pf.particle_types:
                     particle_type = True
                 else:
                     particle_type = False
-                if not particle_type and field[1] in known_fields:
+                if isinstance(field, tuple) and not particle_type and \
+                   field[1] in known_fields:
                     mylog.debug("Adding known field %s to list of fields", field)
                     self.pf.field_info[field] = known_fields[field[1]]
                     continue


https://bitbucket.org/yt_analysis/yt-3.0/commits/a1139ee6ce0c/
Changeset:   a1139ee6ce0c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 23:49:18
Summary:     Fixing exception formatting.
Affected #:  1 file

diff -r aea239f8bd7385cd518c4b8dee3a9ab4697cec25 -r a1139ee6ce0c3c10a5ea0e689a4ffd83bd40be81 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -241,7 +241,7 @@
         self.field = field
 
     def __str__(self):
-        return "Cannot identify field %s" % self.field
+        return "Cannot identify field %s" % (self.field,)
 
 class YTDataSelectorNotImplemented(YTException):
     def __init__(self, class_name):


https://bitbucket.org/yt_analysis/yt-3.0/commits/fa2e4c694b53/
Changeset:   fa2e4c694b53
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 00:19:54
Summary:     Removing hdf5_light_reader and switching to low-level h5py interface.
Affected #:  2 files

diff -r a1139ee6ce0c3c10a5ea0e689a4ffd83bd40be81 -r fa2e4c694b533ede2a1dc65fcc616f118a93f04e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -38,7 +38,6 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
-from yt.utilities import hdf5_light_reader
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
 
@@ -259,6 +258,7 @@
             mylog.debug("Detected HDF4")
         except:
             try:
+                # This will always fail
                 list_of_sets = hdf5_light_reader.ReadListOfDatasets(test_grid, "/")
             except:
                 print "Could not find dataset.  Defaulting to packed HDF5"

diff -r a1139ee6ce0c3c10a5ea0e689a4ffd83bd40be81 -r fa2e4c694b533ede2a1dc65fcc616f118a93f04e yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -16,7 +16,6 @@
 import exceptions
 import os
 
-from yt.utilities import hdf5_light_reader
 from yt.utilities.io_handler import \
     BaseIOHandler, _axis_ids
 from yt.utilities.logger import ytLogger as mylog
@@ -47,11 +46,12 @@
                 fields.append( ("io", str(name)) )
             else:
                 fields.append( ("gas", str(name)) )
+        f.close()
         return fields
 
     @property
     def _read_exception(self):
-        return (exceptions.KeyError, hdf5_light_reader.ReadingError)
+        return (exceptions.KeyError,)
 
     def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
@@ -159,17 +159,18 @@
                    size, [f2 for f1, f2 in fields], ng)
         ind = 0
         for chunk in chunks:
-            f = None
+            fid = None
             for g in chunk.objs:
-                if f is None:
-                    #print "Opening (count) %s" % g.filename
-                    f = h5py.File(g.filename, "r")
+                if fid is None:
+                    fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
+                data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    ds = f.get("/Grid%08i/%s" % (g.id, fname)).value.swapaxes(0,2)
-                    nd = g.select(selector, ds, rv[field], ind) # caches
+                    dg = h5py.h5d.open(fid, "/Grid%08i/Density" % g.id)
+                    dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+                    nd = g.select(selector, data_view, rv[field], ind) # caches
                 ind += nd
-            f.close()
         return rv
 
 
@@ -184,6 +185,7 @@
                       slice(NGZ, -NGZ))
 
     def _read_raw_data_set(self, grid, field):
+        raise NotImplementedError
         return hdf5_light_reader.ReadData(grid.filename,
                 "/Grid%08i/%s" % (grid.id, field))
 
@@ -244,6 +246,7 @@
     _particle_reader = False
 
     def _read_data_set(self, grid, field):
+        raise NotImplementedError
         return hdf5_light_reader.ReadData(grid.filename,
             "/Grid%08i/%s" % (grid.id, field)).transpose()[:,:,None]
 
@@ -251,6 +254,7 @@
         pass
 
     def _read_data_slice(self, grid, field, axis, coord):
+        raise NotImplementedError
         t = hdf5_light_reader.ReadData(grid.filename, "/Grid%08i/%s" %
                         (grid.id, field)).transpose()
         return t
@@ -303,6 +307,7 @@
     _particle_reader = False
 
     def _read_data_set(self, grid, field):
+        raise NotImplementedError
         return hdf5_light_reader.ReadData(grid.filename,
             "/Grid%08i/%s" % (grid.id, field)).transpose()[:,None,None]
 
@@ -310,6 +315,7 @@
         pass
 
     def _read_data_slice(self, grid, field, axis, coord):
+        raise NotImplementedError
         t = hdf5_light_reader.ReadData(grid.filename, "/Grid%08i/%s" %
                         (grid.id, field))
         return t


https://bitbucket.org/yt_analysis/yt-3.0/commits/91d207c96176/
Changeset:   91d207c96176
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 13:47:53
Summary:     Enable default_fluid_type guessing for fields.
Affected #:  1 file

diff -r fa2e4c694b533ede2a1dc65fcc616f118a93f04e -r 91d207c961766ed4d6d58247cafff4a4c1bd3b1f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -319,10 +319,12 @@
             return self._last_finfo
         # We also should check "all" for particles, which can show up if you're
         # mixing deposition/gas fields with particle fields.
-        if guessing_type and ("all", fname) in self.field_info:
-            self._last_freq = ("all", fname)
-            self._last_finfo = self.field_info["all", fname]
-            return self._last_finfo
+        if guessing_type:
+            for ftype in ("all", self.default_fluid_type):
+                if (ftype, fname) in self.field_info:
+                    self._last_freq = (ftype, fname)
+                    self._last_finfo = self.field_info[(ftype, fname)]
+                    return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 
     def add_particle_union(self, union):


https://bitbucket.org/yt_analysis/yt-3.0/commits/29e5074753e9/
Changeset:   29e5074753e9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 13:55:25
Summary:     Removing more of hdf5_light_reader
Affected #:  2 files

diff -r 91d207c961766ed4d6d58247cafff4a4c1bd3b1f -r 29e5074753e9f059f96b57209d832910d2b80a1f yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -50,14 +50,6 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
 
-def get_field_names_helper(filename, id, results):
-    try:
-        names = hdf5_light_reader.ReadListOfDatasets(
-                    filename, "/Grid%08i" % id)
-        results.put((names, "Grid %s has: %s" % (id, names)))
-    except (exceptions.KeyError, hdf5_light_reader.ReadingError):
-        results.put((None, "Grid %s is a bit funky?" % id))
-
 class EnzoGrid(AMRGridPatch):
     """
     Class representing a single Enzo Grid instance.
@@ -449,40 +441,18 @@
         self.field_list = []
         # Do this only on the root processor to save disk work.
         if self.comm.rank in (0, None):
-            field_list = self.get_data("/", "DataFields")
-            if field_list is None:
-                mylog.info("Gathering a field list (this may take a moment.)")
-                field_list = set()
-                random_sample = self._generate_random_grids()
-                tothread = ytcfg.getboolean("yt","thread_field_detection")
-                if tothread:
-                    jobs = []
-                    result_queue = Queue.Queue()
-                    # Start threads
-                    for grid in random_sample:
-                        if not hasattr(grid, 'filename'): continue
-                        helper = Thread(target = get_field_names_helper, 
-                            args = (grid.filename, grid.id, result_queue))
-                        jobs.append(helper)
-                        helper.start()
-                    # Here we make sure they're finished.
-                    for helper in jobs:
-                        helper.join()
-                    for grid in random_sample:
-                        res = result_queue.get()
-                        mylog.debug(res[1])
-                        if res[0] is not None:
-                            field_list = field_list.union(res[0])
-                else:
-                    for grid in random_sample:
-                        if not hasattr(grid, 'filename'): continue
-                        try:
-                            gf = self.io._read_field_names(grid)
-                        except self.io._read_exception:
-                            mylog.debug("Grid %s is a bit funky?", grid.id)
-                            continue
-                        mylog.debug("Grid %s has: %s", grid.id, gf)
-                        field_list = field_list.union(gf)
+            mylog.info("Gathering a field list (this may take a moment.)")
+            field_list = set()
+            random_sample = self._generate_random_grids()
+            for grid in random_sample:
+                if not hasattr(grid, 'filename'): continue
+                try:
+                    gf = self.io._read_field_names(grid)
+                except self.io._read_exception:
+                    mylog.debug("Grid %s is a bit funky?", grid.id)
+                    continue
+                mylog.debug("Grid %s has: %s", grid.id, gf)
+                field_list = field_list.union(gf)
             if "AppendActiveParticleType" in self.parameter_file.parameters:
                 ap_fields = self._detect_active_particle_fields()
                 field_list = list(set(field_list).union(ap_fields))

diff -r 91d207c961766ed4d6d58247cafff4a4c1bd3b1f -r 29e5074753e9f059f96b57209d832910d2b80a1f yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -105,34 +105,6 @@
                         yield (ptype, field), data
             f.close()
 
-    def _read_grid_chunk(self, chunks, fields):
-        sets = [fname for ftype, fname in fields]
-        g = chunks[0].objs[0]
-        if g.filename is None:
-            return {}
-        rv = hdf5_light_reader.ReadMultipleGrids(
-            g.filename, [g.id], sets, "")[g.id]
-        for ftype, fname in fields:
-            rv[(ftype, fname)] = rv.pop(fname).swapaxes(0,2)
-        return rv
-
-    def _read_chunk_data(self, chunk, fields, suffix = ""):
-        data = {}
-        grids_by_file = defaultdict(list)
-        for g in chunk.objs:
-            if g.filename is None:
-                continue
-            grids_by_file[g.filename].append(g.id)
-        #if len(chunk.objs) == 1 and len(grids_by_file) > 0:
-        #    raise RuntimeError
-        sets = [fname for ftype, fname in fields]
-        for filename in grids_by_file:
-            nodes = grids_by_file[filename]
-            nodes.sort()
-            data.update(hdf5_light_reader.ReadMultipleGrids(
-                filename, nodes, sets, suffix))
-        return data
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
         # Now we have to do something unpleasant
@@ -185,9 +157,10 @@
                       slice(NGZ, -NGZ))
 
     def _read_raw_data_set(self, grid, field):
-        raise NotImplementedError
-        return hdf5_light_reader.ReadData(grid.filename,
-                "/Grid%08i/%s" % (grid.id, field))
+        f = h5py.File(grid.filename, "r")
+        ds = f["/Grid%08i/%s" % (grid.id, field)][:].swapaxes(0,2)
+        f.close()
+        return ds
 
 class IOHandlerInMemory(BaseIOHandler):
 
@@ -246,19 +219,14 @@
     _particle_reader = False
 
     def _read_data_set(self, grid, field):
-        raise NotImplementedError
-        return hdf5_light_reader.ReadData(grid.filename,
-            "/Grid%08i/%s" % (grid.id, field)).transpose()[:,:,None]
+        f = h5py.File(grid.filename, "r")
+        ds = f["/Grid%08i/%s" % (grid.id, field)][:]
+        f.close()
+        return ds.transpose()[:,:,None]
 
     def modify(self, field):
         pass
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        raise NotImplementedError
-        t = hdf5_light_reader.ReadData(grid.filename, "/Grid%08i/%s" %
-                        (grid.id, field)).transpose()
-        return t
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
         # Now we have to do something unpleasant
@@ -307,16 +275,10 @@
     _particle_reader = False
 
     def _read_data_set(self, grid, field):
-        raise NotImplementedError
-        return hdf5_light_reader.ReadData(grid.filename,
-            "/Grid%08i/%s" % (grid.id, field)).transpose()[:,None,None]
+        f = h5py.File(grid.filename, "r")
+        ds = f["/Grid%08i/%s" % (grid.id, field)][:]
+        f.close()
+        return ds.transpose()[:,None,None]
 
     def modify(self, field):
         pass
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        raise NotImplementedError
-        t = hdf5_light_reader.ReadData(grid.filename, "/Grid%08i/%s" %
-                        (grid.id, field))
-        return t
-


https://bitbucket.org/yt_analysis/yt-3.0/commits/6a382ffc7de2/
Changeset:   6a382ffc7de2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 14:13:22
Summary:     Removing largely unused code from Enzo.
Affected #:  1 file

diff -r 29e5074753e9f059f96b57209d832910d2b80a1f -r 6a382ffc7de242162e259a32317fb9a7ef6d9817 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -244,35 +244,21 @@
             mylog.debug("Your data uses the annoying hardcoded path.")
             self._strip_path = True
         if self.data_style is not None: return
-        try:
-            a = SD.SD(test_grid)
-            self.data_style = 'enzo_hdf4'
-            mylog.debug("Detected HDF4")
-        except:
-            try:
-                # This will always fail
-                list_of_sets = hdf5_light_reader.ReadListOfDatasets(test_grid, "/")
-            except:
-                print "Could not find dataset.  Defaulting to packed HDF5"
-                list_of_sets = []
-            if len(list_of_sets) == 0 and rank == 3:
-                mylog.debug("Detected packed HDF5")
-                if self.parameters.get("WriteGhostZones", 0) == 1:
-                    self.data_style= "enzo_packed_3d_gz"
-                    self.grid = EnzoGridGZ
-                else:
-                    self.data_style = 'enzo_packed_3d'
-            elif len(list_of_sets) > 0 and rank == 3:
-                mylog.debug("Detected unpacked HDF5")
-                self.data_style = 'enzo_hdf5'
-            elif len(list_of_sets) == 0 and rank == 2:
-                mylog.debug("Detect packed 2D")
-                self.data_style = 'enzo_packed_2d'
-            elif len(list_of_sets) == 0 and rank == 1:
-                mylog.debug("Detect packed 1D")
-                self.data_style = 'enzo_packed_1d'
+        if rank == 3:
+            mylog.debug("Detected packed HDF5")
+            if self.parameters.get("WriteGhostZones", 0) == 1:
+                self.data_style= "enzo_packed_3d_gz"
+                self.grid = EnzoGridGZ
             else:
-                raise TypeError
+                self.data_style = 'enzo_packed_3d'
+        elif rank == 2:
+            mylog.debug("Detect packed 2D")
+            self.data_style = 'enzo_packed_2d'
+        elif rank == 1:
+            mylog.debug("Detect packed 1D")
+            self.data_style = 'enzo_packed_1d'
+        else:
+            raise NotImplementedError
 
     # Sets are sorted, so that won't work!
     def _parse_hierarchy(self):


https://bitbucket.org/yt_analysis/yt-3.0/commits/f484cd000006/
Changeset:   f484cd000006
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 14:13:35
Summary:     Fixing test failures for particle geometry.
Affected #:  2 files

diff -r 6a382ffc7de242162e259a32317fb9a7ef6d9817 -r f484cd000006f97e01480857fde648fb31455329 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -193,5 +193,4 @@
     registry.add_field((ptype, "Velocities"),
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
-
     return list(set(registry.keys()).difference(orig))

diff -r 6a382ffc7de242162e259a32317fb9a7ef6d9817 -r f484cd000006f97e01480857fde648fb31455329 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -37,6 +37,9 @@
     OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.data_objects.particle_fields import \
+    particle_vector_functions, \
+    particle_deposition_functions
 from yt.geometry.oct_container import \
     OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
@@ -747,6 +750,15 @@
     n_ref = 64
     over_refine_factor = 1
 
+    def _setup_particle_type(self, ptype):
+        df = particle_vector_functions(ptype,
+            ["particle_position_%s" % ax for ax in 'xyz'],
+            ["particle_velocity_%s" % ax for ax in 'xyz'],
+            self.field_info)
+        df += particle_deposition_functions(ptype,
+            "Coordinates", "particle_mass", self.field_info)
+        return df
+
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
                       n_ref = 64, over_refine_factor = 1):


https://bitbucket.org/yt_analysis/yt-3.0/commits/64769bc42a9f/
Changeset:   64769bc42a9f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 15:52:03
Summary:     Adding mode 'r' to h5py.File
Affected #:  1 file

diff -r f484cd000006f97e01480857fde648fb31455329 -r 64769bc42a9f26eee8adef00fc7656df1e8e2fad yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -113,7 +113,7 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             g = chunks[0].objs[0]
-            f = h5py.File(g.filename)
+            f = h5py.File(g.filename, 'r')
             gds = f.get("/Grid%08i" % g.id)
             for ftype, fname in fields:
                 rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0,2)
@@ -235,7 +235,7 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             g = chunks[0].objs[0]
-            f = h5py.File(g.filename)
+            f = h5py.File(g.filename, 'r')
             gds = f.get("/Grid%08i" % g.id)
             for ftype, fname in fields:
                 rv[(ftype, fname)] = np.atleast_3d(gds.get(fname).value)


https://bitbucket.org/yt_analysis/yt-3.0/commits/4a9a2e0b38ad/
Changeset:   4a9a2e0b38ad
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 16:50:46
Summary:     Fixing _setup_particle_type for SPH data.
Affected #:  1 file

diff -r 64769bc42a9f26eee8adef00fc7656df1e8e2fad -r 4a9a2e0b38ad7cf1f325b6a084ef235b219d7ce9 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -124,6 +124,7 @@
         self.field_info.add_field((ptype, "particle_index"),
             function = TranslationFunc((ptype, "ParticleIDs")),
             particle_type = True)
+        return [ (ptype, "particle_index") ]
 
 class GadgetStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler


https://bitbucket.org/yt_analysis/yt-3.0/commits/718d82f91202/
Changeset:   718d82f91202
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 17:02:21
Summary:     Fixing some particle field stuff for ARTIO.
Affected #:  3 files

diff -r 4a9a2e0b38ad7cf1f325b6a084ef235b219d7ce9 -r 718d82f91202ea681525a38488d2dcbf0eb49b90 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -25,7 +25,12 @@
 import _artio_caller
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
-from .fields import ARTIOFieldInfo, KnownARTIOFields, b2t
+from .fields import \
+    ARTIOFieldInfo, \
+    KnownARTIOFields, \
+    b2t, \
+    _setup_particle_fields
+
 
 from yt.funcs import *
 from yt.geometry.geometry_handler import \
@@ -503,6 +508,11 @@
         # hard coded assumption of 3D periodicity (add to parameter file)
         self.periodicity = (True, True, True)
 
+    def _setup_particle_type(self, ptype):
+        orig = set(self.field_info.keys())
+        _setup_particle_fields(self.field_info, ptype)
+        return list(set(self.field_info.keys()).difference(orig))
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         # a valid artio header file starts with a prefix and ends with .art

diff -r 4a9a2e0b38ad7cf1f325b6a084ef235b219d7ce9 -r 718d82f91202ea681525a38488d2dcbf0eb49b90 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -239,23 +239,31 @@
 ##################################################
 #Particle fields
 
-for ptype in ("nbody", "stars"):
+def _setup_particle_fields(registry, ptype):
     for ax in 'xyz':
-        add_artio_field((ptype, "particle_velocity_%s" % ax),
+        registry.add_field(
+                        (ptype, "particle_velocity_%s" % ax),
                         function=NullFunc,
                         particle_type=True)
-        add_artio_field((ptype, "particle_position_%s" % ax),
+        registry.add_field(
+                        (ptype, "particle_position_%s" % ax),
                         function=NullFunc,
                         particle_type=True)
 
     def _convertParticleMass(data):
         return np.float64(data.convert('particle_mass'))
-    add_artio_field((ptype, "particle_mass"),
+    registry.add_field((ptype, "particle_mass"),
               function=NullFunc,
               convert_function=_convertParticleMass,
               units=r"\rm{g}",
               particle_type=True)
-    add_artio_field((ptype, "particle_index"), function=NullFunc, particle_type=True)
+
+    particle_vector_functions(ptype,
+        ["particle_position_%s" % ax for ax in 'xyz'],
+        ["particle_velocity_%s" % ax for ax in 'xyz'],
+        registry)
+    particle_deposition_functions(ptype, "Coordinates", "particle_mass",
+        registry)
 
 #add_artio_field("creation_time", function=NullFunc, particle_type=True)
 def _particle_age(field, data):
@@ -267,24 +275,6 @@
 add_field(("stars","particle_age"), function=_particle_age, units=r"\rm{s}",
           particle_type=True)
 
-# We can now set up particle vector and particle deposition fields.
-
-for fname in ["particle_position_%s" % ax for ax in 'xyz'] + \
-             ["particle_velocity_%s" % ax for ax in 'xyz'] + \
-             ["particle_index", "particle_species",
-              "particle_mass"]:
-    func = _field_concat(fname)
-    ARTIOFieldInfo.add_field(("all", fname), function=func,
-            particle_type = True)
-
-for ptype in ("nbody", "stars", "all"):
-    particle_vector_functions(ptype,
-        ["particle_position_%s" % ax for ax in 'xyz'],
-        ["particle_velocity_%s" % ax for ax in 'xyz'],
-        ARTIOFieldInfo)
-    particle_deposition_functions(ptype, "Coordinates", "particle_mass",
-        ARTIOFieldInfo)
-
 def mass_dm(field, data):
     tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"] < 5

diff -r 4a9a2e0b38ad7cf1f325b6a084ef235b219d7ce9 -r 718d82f91202ea681525a38488d2dcbf0eb49b90 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -948,8 +948,9 @@
         return os.path.exists("%s.hierarchy" % args[0])
 
     def _setup_particle_type(self, ptype):
-        df = _setup_particle_fields(self.field_info, ptype)
-        return df
+        orig = set(self.field_info.keys())
+        _setup_particle_fields(self.field_info, ptype)
+        return list(set(self.field_info.keys()).difference(orig))
 
 class EnzoStaticOutputInMemory(EnzoStaticOutput):
     _hierarchy_class = EnzoHierarchyInMemory


https://bitbucket.org/yt_analysis/yt-3.0/commits/4c3ea311e92b/
Changeset:   4c3ea311e92b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 18:51:47
Summary:     Fixing fid.close and unused numpy import.
Affected #:  2 files

diff -r 718d82f91202ea681525a38488d2dcbf0eb49b90 -r 4c3ea311e92be86be2ed36239c6d1f6ae40d6d6a yt/data_objects/particle_unions.py
--- a/yt/data_objects/particle_unions.py
+++ b/yt/data_objects/particle_unions.py
@@ -15,7 +15,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 from yt.funcs import ensure_list
 
 class ParticleUnion(object):

diff -r 718d82f91202ea681525a38488d2dcbf0eb49b90 -r 4c3ea311e92be86be2ed36239c6d1f6ae40d6d6a yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -143,6 +143,7 @@
                     dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
                     nd = g.select(selector, data_view, rv[field], ind) # caches
                 ind += nd
+            fid.close()
         return rv
 
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/9d3d8e1ecda6/
Changeset:   9d3d8e1ecda6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 18:56:04
Summary:     Well, this seems like a freebie.
Affected #:  1 file

diff -r 4c3ea311e92be86be2ed36239c6d1f6ae40d6d6a -r 9d3d8e1ecda676896aad8d86bdd1b740d4f58770 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -139,7 +139,7 @@
                 data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    dg = h5py.h5d.open(fid, "/Grid%08i/Density" % g.id)
+                    dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
                     dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
                     nd = g.select(selector, data_view, rv[field], ind) # caches
                 ind += nd


https://bitbucket.org/yt_analysis/yt-3.0/commits/f21bb7349dfe/
Changeset:   f21bb7349dfe
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 19:35:02
Summary:     Rewrite the ARTIO particle reading and fix mass bug.
Affected #:  4 files

diff -r 9d3d8e1ecda676896aad8d86bdd1b740d4f58770 -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -916,7 +916,7 @@
             "species_%02u_secondary_variable_labels" % (species,), [])
         tp = total_particles[species]
         vp = &vpoints[species]
-        if field == "MASS":
+        if field == "MASS" and params["particle_species_mass"][species] != 0.0:
             vp.n_mass = 1
             npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
             # We fill this *now*

diff -r 9d3d8e1ecda676896aad8d86bdd1b740d4f58770 -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -474,6 +474,7 @@
             self.num_species = 0
             self.particle_variables = []
             self.particle_types = ()
+        self.particle_types_raw = self.particle_types
 
         self.current_time = b2t(self.artio_parameters["tl"][0])
 

diff -r 9d3d8e1ecda676896aad8d86bdd1b740d4f58770 -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -28,8 +28,7 @@
 from yt.data_objects.particle_fields import \
     particle_deposition_functions, \
     particle_vector_functions, \
-    particle_scalar_functions, \
-    _field_concat, _field_concat_slice
+    particle_scalar_functions
 import numpy as np
 
 KnownARTIOFields = FieldInfoContainer()

diff -r 9d3d8e1ecda676896aad8d86bdd1b740d4f58770 -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 yt/frontends/artio/io.py
--- a/yt/frontends/artio/io.py
+++ b/yt/frontends/artio/io.py
@@ -34,33 +34,39 @@
                 cp += artchunk.data_size
         return tr
 
-    def _read_particle_selection(self, chunks, selector, fields):
-        fd = dict((ftuple, []) for ftuple in fields)
-        ftypes = set(ftype for (ftype, fname) in fields)
-        for ftype in ftypes:
+    def _read_particle_coords(self, chunks, ptf):
+        pn = "particle_position_%s"
+        chunks = list(chunks)
+        fields = [(ptype, "particle_position_%s" % ax)
+                  for ptype, field_list in ptf.items()
+                  for ax in 'xyz']
+        for chunk in chunks: # These should be organized by grid filename
+            for subset in chunk.objs:
+                rv = dict(**subset.fill_particles(fields))
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z = (np.asarray(rv[ptype][pn % ax], dtype="=f8")
+                               for ax in 'xyz')
+                    yield ptype, (x, y, z)
+                    rv.pop(ptype)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        pn = "particle_position_%s"
+        chunks = list(chunks)
+        fields = [(ptype, fname) for ptype, field_list in ptf.items()
+                                 for fname in field_list]
+        for ptype, field_list in sorted(ptf.items()):
             for ax in 'xyz':
-                ftuple = (ftype, "particle_position_%s" % ax)
-                if ftuple not in fd:
-                    fd[ftuple] = []
-        for onechunk in chunks:
-            # We're going to read here
-            for artchunk in onechunk.objs:
-                rv = artchunk.fill_particles(fd.keys())
-                # Now we count and also cut
-                for ftype in rv:
-                    mask = selector.select_points(
-                        rv[ftype]["particle_position_x"],
-                        rv[ftype]["particle_position_y"],
-                        rv[ftype]["particle_position_z"])
-                    if mask is None: continue
-                    for fname in rv[ftype]:
-                        if (ftype, fname) not in fields: continue
-                        fd[ftype, fname].append(rv[ftype][fname][mask])
-        # This needs to be pre-initialized in case we are later concatenated.
-        tr = dict((ftuple, np.empty(0, dtype='float64')) for ftuple in fields)
-        for f in fd.keys():
-            v = fd.pop(f)
-            if f not in fields: continue
-            if len(v) == 0: continue
-            tr[f] = np.concatenate(v)
-        return tr
+                pp = "particle_position_%s" % ax
+                if pp not in field_list:
+                    fields.append((ptype, pp))
+        for chunk in chunks: # These should be organized by grid filename
+            for subset in chunk.objs:
+                rv = dict(**subset.fill_particles(fields))
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z = (np.asarray(rv[ptype][pn % ax], dtype="=f8")
+                               for ax in 'xyz')
+                    mask = selector.select_points(x, y, z)
+                    for field in field_list:
+                        data = np.asarray(rv[ptype][field], "=f8")
+                        yield (ptype, field), data
+                    rv.pop(ptype)


https://bitbucket.org/yt_analysis/yt-3.0/commits/8293ecbdad84/
Changeset:   8293ecbdad84
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 19:35:47
Summary:     Merging from mainline yt development
Affected #:  37 files

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,8 @@
     TwoPointFunctions, \
     FcnSet
 
+from .sunyaev_zeldovich.api import SZProjection
+
 from .radmc3d_export.api import \
     RadMC3DWriter
+

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
+    config.add_subpackage("sunyaev_zeldovich")    
     return config

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from projection import SZProjection

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,349 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system, parallel_root_only
+import numpy as np
+
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+        
+try:
+    import SZpack
+except:
+    raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
+
+vlist = "xyz"
+
+def _t_squared(field, data):
+    return data["Density"]*data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+    return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par_squared(field, data):
+    return data["BetaPar"]**2/data["Density"]
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+    return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def _t_sz(field, data):
+    return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
+
+class SZProjection(object):
+    r""" Initialize a SZProjection object.
+
+    Parameters
+    ----------
+    pf : parameter_file
+        The parameter file.
+    freqs : array_like
+        The frequencies (in GHz) at which to compute the SZ spectral distortion.
+    mue : float, optional
+        Mean molecular weight for determining the electron number density.
+    high_order : boolean, optional
+        Should we calculate high-order moments of velocity and temperature?
+
+    Examples
+    --------
+    >>> freqs = [90., 180., 240.]
+    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    """
+    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+            
+        self.pf = pf
+        self.num_freqs = len(freqs)
+        self.high_order = high_order
+        self.freqs = np.array(freqs)
+        self.mueinv = 1./mue
+        self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
+        self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+        self.data = {}
+
+        self.units = {}
+        self.units["TeSZ"] = r"$\mathrm{keV}$"
+        self.units["Tau"] = None
+
+        self.display_names = {}
+        self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+        self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+        for f, field in zip(self.freqs, self.freq_fields):
+            self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+            
+    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an on-axis projection of the SZ signal.
+
+        Parameters
+        ----------
+        axis : integer or string
+            The axis of the simulation domain along which to make the SZprojection.
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+
+        Examples
+        --------
+        >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+        """
+        axis = fix_axis(axis)
+
+        def _beta_par(field, data):
+            axis = data.get_field_parameter("axis")
+            vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)    
+
+        proj = self.pf.h.proj(axis, "Density", data_source=source)
+        proj.set_field_parameter("axis", axis)
+        frb = proj.to_frb(width, nx)
+        dens = frb["Density"]
+        Te = frb["TeSZ"]/dens
+        bpar = frb["BetaPar"]/dens
+        omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+        bperp2 = np.zeros((nx,nx))
+        sigma1 = np.zeros((nx,nx))
+        kappa1 = np.zeros((nx,nx))                                    
+        if self.high_order:
+            bperp2 = frb["BetaPerpSquared"]/dens
+            sigma1 = frb["TBetaPar"]/dens/Te - bpar
+            kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        nx,ny = frb.buff_size
+        self.bounds = frb.bounds
+        self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+        self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+        self.nx = nx
+        
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+                                                                                                                
+    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an off-axis projection of the SZ signal.
+        
+        Parameters
+        ----------
+        L : array_like
+            The normal vector of the projection. 
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+            Currently unsupported in yt 2.x.
+                    
+        Examples
+        --------
+        >>> L = np.array([0.5, 1.0, 0.75])
+        >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+        """
+        if iterable(width):
+            w = width[0]/self.pf.units[width[1]]
+        else:
+            w = width
+        if center == "c":
+            ctr = self.pf.domain_center
+        elif center == "max":
+            ctr = self.pf.h.find_max("Density")
+        else:
+            ctr = center
+
+        if source is not None:
+            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+            raise NotImplementedError
+                
+        def _beta_par(field, data):
+            vpar = data["Density"]*(data["x-velocity"]*L[0]+
+                                    data["y-velocity"]*L[1]+
+                                    data["z-velocity"]*L[2])
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)
+
+        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
+        omega1  = omega1/(Te*Te) - 1.
+        if self.high_order:
+            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
+            sigma1  = sigma1/Te - bpar
+            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
+            kappa1 -= bpar
+        else:
+            bperp2 = np.zeros((nx,nx))
+            sigma1 = np.zeros((nx,nx))
+            kappa1 = np.zeros((nx,nx))
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+        self.dx = w/nx
+        self.dy = w/nx
+        self.nx = nx
+
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+    def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
+
+        # Bad hack, but we get NaNs if we don't do something like this
+        small_beta = np.abs(bpar) < 1.0e-20
+        bpar[small_beta] = 1.0e-20
+                                                                   
+        comm = communication_system.communicators[-1]
+
+        nx, ny = self.nx,self.nx
+        signal = np.zeros((self.num_freqs,nx,ny))
+        xo = np.zeros((self.num_freqs))
+        
+        k = int(0)
+
+        start_i = comm.rank*nx/comm.size
+        end_i = (comm.rank+1)*nx/comm.size
+                        
+        pbar = get_pbar("Computing SZ signal.", nx*nx)
+
+        for i in xrange(start_i, end_i):
+            for j in xrange(ny):
+                xo[:] = self.xinit[:]
+                SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+                                           bpar[i,j], omega1[i,j],
+                                           sigma1[i,j], kappa1[i,j], bperp2[i,j])
+                signal[:,i,j] = xo[:]
+                pbar.update(k)
+                k += 1
+
+        signal = comm.mpi_allreduce(signal)
+        
+        pbar.finish()
+                
+        for i, field in enumerate(self.freq_fields):
+            self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+        self.data["Tau"] = ImageArray(tau)
+        self.data["TeSZ"] = ImageArray(Te)
+
+    @parallel_root_only
+    def write_fits(self, filename_prefix, clobber=True):
+        r""" Export images to a FITS file. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc.  
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the FITS filename.
+        clobber : boolean, optional
+            If the file already exists, do we overwrite?
+                    
+        Examples
+        --------
+        >>> szprj.write_fits("SZbullet", clobber=False)
+        """
+        coords = {}
+        coords["dx"] = self.dx*self.pf.units["kpc"]
+        coords["dy"] = self.dy*self.pf.units["kpc"]
+        coords["xctr"] = 0.0
+        coords["yctr"] = 0.0
+        coords["units"] = "kpc"
+        other_keys = {"Time" : self.pf.current_time}
+        write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=other_keys)
+
+    @parallel_root_only
+    def write_png(self, filename_prefix):
+        r""" Export images to PNG files. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc. 
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the image filenames.
+                
+        Examples
+        --------
+        >>> szprj.write_png("SZsloshing")
+        """     
+        extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+        for field, image in self.items():
+            filename=filename_prefix+"_"+field+".png"
+            label = self.display_names[field]
+            if self.units[field] is not None:
+                label += " ("+self.units[field]+")"
+            write_projection(image, filename, colorbar_label=label, take_log=False,
+                             extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+                             ylabel=r"$\mathrm{y\ (kpc)}$")
+
+    @parallel_root_only
+    def write_hdf5(self, filename):
+        r"""Export the set of S-Z fields to a set of HDF5 datasets.
+        
+        Parameters
+        ----------
+        filename : string
+            This file will be opened in "write" mode.
+        
+        Examples
+        --------
+        >>> szprj.write_hdf5("SZsloshing.h5")                        
+        """
+        import h5py
+        f = h5py.File(filename, "w")
+        for field, data in self.items():
+            f.create_dataset(field,data=data)
+        f.close()
+   
+    def keys(self):
+        return self.data.keys()
+
+    def items(self):
+        return self.data.items()
+
+    def values(self):
+        return self.data.values()
+    
+    def has_key(self, key):
+        return key in self.data.keys()
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    @property
+    def shape(self):
+        return (self.nx,self.nx)

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+    config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,139 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar, mylog
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+     mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+     GenericArrayTest, data_dir_load, GenericImageTest
+try:
+    from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+    pass
+import numpy as np
+try:
+    import SZpack
+except ImportError:
+    pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+def full_szpack3d(pf, xo):
+    data = pf.h.grids[0]
+    dz = pf.h.get_smallest_dx()*pf.units["cm"]
+    nx,ny,nz = data["Density"].shape
+    dn = np.zeros((nx,ny,nz))
+    Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+    Te = data["Temperature"]/K_per_keV
+    betac = data["z-velocity"]/clight
+    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
+    for i in xrange(nx):
+        pbar.update(i)
+        for j in xrange(ny):
+            for k in xrange(nz):
+                dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+                                              Te[i,j,k], betac[i,j,k],
+                                              1.0, 0.0, 0.0, 1.0e-5)
+    pbar.finish()
+    return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+    R = 1000.
+    r_c = 100.
+    rho_c = 1.673e-26
+    beta = 1.
+    T0 = 4.
+    nx,ny,nz = 16,16,16
+    c = 0.17
+    a_c = 30.
+    a = 200.
+    v0 = 300.*cm_per_km
+    ddims = (nx,ny,nz)
+
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:ny*1j,
+                       -R:R:nz*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+    velz = v0*temp/(T0*K_per_keV)
+
+    data = {}
+    data["Density"] = dens
+    data["Temperature"] = temp
+    data["x-velocity"] = np.zeros(ddims)
+    data["y-velocity"] = np.zeros(ddims)
+    data["z-velocity"] = velz
+
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+    L = 2*R*cm_per_kpc
+    dl = L/nz
+
+    pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+    return pf
+
+ at requires_module("SZpack")
+def test_projection():
+    pf = setup_cluster()
+    nx,ny,nz = pf.domain_dimensions
+    xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+    szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+    szprj.on_axis(2, nx=nx)
+    deltaI = np.zeros((3,nx,ny))
+    for i in xrange(3):
+        deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+        yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.on_axis(2, nx=100)
+    def onaxis_array_func():
+        return szprj.data
+    def onaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    for test in [GenericArrayTest(pf, onaxis_array_func),
+                 GenericImageTest(pf, onaxis_image_func, 3)]:
+        test_M7_onaxis.__name__ = test.description
+        yield test
+
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+    def offaxis_array_func():
+        return szprj.data
+    def offaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    for test in [GenericArrayTest(pf, offaxis_array_func),
+                 GenericImageTest(pf, offaxis_image_func, 3)]:
+        test_M7_offaxis.__name__ = test.description
+        yield test

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -498,7 +498,7 @@
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
             fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
-            find_many_nn_nearest_neighbors()
+            fKD.find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
         return n

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold310',
+    gold_standard_filename = 'gold311',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -113,7 +113,7 @@
     _domain_ind = None
 
     def select_blocks(self, selector):
-        mask = self.oct_handler.mask(selector)
+        mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
         mask = self._reshape_vals(mask)
         slicer = OctreeSubsetBlockSlice(self)
         for i, sl in slicer:
@@ -271,12 +271,14 @@
 
     @property
     def LeftEdge(self):
-        LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+        LE = (self._fcoords[0,0,0,self.ind,:]
+            - self._fwidth[0,0,0,self.ind,:])*0.5
         return LE
 
     @property
     def RightEdge(self):
-        RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+        RE = (self._fcoords[-1,-1,-1,self.ind,:]
+            + self._fwidth[-1,-1,-1,self.ind,:])*0.5
         return RE
 
     @property

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -93,6 +93,9 @@
         if fname.startswith("CuttingPlane"): continue
         if fname.startswith("particle"): continue
         if fname.startswith("CIC"): continue
+        if field.startswith("BetaPar"): continue
+        if field.startswith("TBetaPar"): continue
+        if field.startswith("BetaPerp"): continue
         if fname.startswith("WeakLensingConvergence"): continue
         if fname.startswith("DensityPerturbation"): continue
         if fname.startswith("Matter_Density"): continue

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -3,7 +3,8 @@
 cimport numpy as np
 import sys 
 
-from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
+from yt.geometry.selection_routines cimport \
+    SelectorObject, AlwaysSelector, OctreeSubsetSelector
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
@@ -122,7 +123,9 @@
     void artio_sfc_coords( artio_fileset_handle *handle, int64_t index, int coords[3] ) nogil
 
 cdef void check_artio_status(int status, char *fname="[unknown]"):
-    if status!=ARTIO_SUCCESS :
+    if status != ARTIO_SUCCESS:
+        import traceback
+        traceback.print_stack()
         callername = sys._getframe().f_code.co_name
         nline = sys._getframe().f_lineno
         raise RuntimeError('failure with status', status, 'in function',fname,'from caller', callername, nline)
@@ -487,8 +490,7 @@
 
         return (fcoords, ires, data)
 
-    def root_sfc_ranges_all(self) :
-        cdef int max_range_size = 1024
+    def root_sfc_ranges_all(self, int max_range_size = 1024) :
         cdef int64_t sfc_start, sfc_end
         cdef artio_selection *selection
 
@@ -502,8 +504,8 @@
         artio_selection_destroy(selection)
         return sfc_ranges
 
-    def root_sfc_ranges(self, SelectorObject selector) :
-        cdef int max_range_size = 1024
+    def root_sfc_ranges(self, SelectorObject selector,
+                        int max_range_size = 1024):
         cdef int coords[3]
         cdef int64_t sfc_start, sfc_end
         cdef np.float64_t left[3]
@@ -561,6 +563,10 @@
     cdef np.float64_t dds[3]
     cdef np.int64_t dims[3]
     cdef public np.int64_t total_octs
+    cdef np.int64_t *doct_count
+    cdef np.int64_t **pcount
+    cdef float **root_mesh_data
+    cdef np.int64_t nvars[2]
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -568,6 +574,7 @@
                  artio_fileset artio_handle,
                  sfc_start, sfc_end):
         cdef int i
+        cdef np.int64_t sfc
         self.sfc_start = sfc_start
         self.sfc_end = sfc_end
         self.artio_handle = artio_handle
@@ -575,25 +582,54 @@
         self.octree_handler = None
         self.handle = artio_handle.handle
         self.oct_count = None
+        self.root_mesh_data = NULL
+        self.pcount = <np.int64_t **> malloc(sizeof(np.int64_t*)
+            * artio_handle.num_species)
+        self.nvars[0] = artio_handle.num_species
+        self.nvars[1] = artio_handle.num_grid_variables
+        for i in range(artio_handle.num_species):
+            self.pcount[i] = <np.int64_t*> malloc(sizeof(np.int64_t)
+                * (self.sfc_end - self.sfc_start + 1))
+            for sfc in range(self.sfc_end - self.sfc_start + 1):
+                self.pcount[i][sfc] = 0
         for i in range(3):
             self.dims[i] = domain_dimensions[i]
             self.DLE[i] = domain_left_edge[i]
             self.DRE[i] = domain_right_edge[i]
             self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
 
+    def __dealloc__(self):
+        cdef int i
+        for i in range(self.nvars[0]):
+            free(self.pcount[i])
+        for i in range(self.nvars[1]):
+            free(self.root_mesh_data[i])
+        free(self.pcount)
+        free(self.root_mesh_data)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def construct_mesh(self):
-        cdef int status, level
-        cdef np.int64_t sfc, oc
+        cdef int status, level, ngv
+        cdef np.int64_t sfc, oc, i
         cdef double dpos[3]
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
+        cdef int num_species = self.artio_handle.num_species
+        cdef int *num_particles_per_species =  <int *>malloc(
+            sizeof(int)*num_species) 
         cdef ARTIOOctreeContainer octree
+        ngv = self.nvars[1]
+        cdef float *grid_variables = <float *>malloc(
+            ngv * sizeof(float))
         self.octree_handler = octree = ARTIOOctreeContainer(self)
+        self.root_mesh_data = <float **>malloc(sizeof(float *) * ngv)
+        for i in range(ngv):
+            self.root_mesh_data[i] = <float *>malloc(sizeof(float) * \
+                (self.sfc_end - self.sfc_start + 1))
         # We want to pre-allocate an array of root pointers.  In the future,
         # this will be pre-determined by the ARTIO library.  However, because
         # realloc plays havoc with our tree searching, we can't utilize an
@@ -606,7 +642,11 @@
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             status = artio_grid_read_root_cell_begin( self.handle,
-                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+                sfc, dpos, grid_variables, &num_oct_levels,
+                num_octs_per_level)
+            for i in range(ngv):
+                self.root_mesh_data[i][sfc - self.sfc_start] = \
+                    grid_variables[i]
             check_artio_status(status)
             if num_oct_levels > 0:
                 oc = 0
@@ -618,13 +658,39 @@
                     num_octs_per_level, sfc)
             status = artio_grid_read_root_cell_end( self.handle )
             check_artio_status(status)
+        status = artio_grid_clear_sfc_cache( self.handle)
+        check_artio_status(status)
+        # Now particles
+        status = artio_particle_cache_sfc_range(self.handle, self.sfc_start,
+                                            self.sfc_end)
+        check_artio_status(status) 
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            # Now particles
+            status = artio_particle_read_root_cell_begin( self.handle,
+                    sfc, num_particles_per_species )
+            check_artio_status(status)
+
+            for i in range(num_species):
+                self.pcount[i][sfc - self.sfc_start] = \
+                    num_particles_per_species[i]
+
+            status = artio_particle_read_root_cell_end( self.handle)
+            check_artio_status(status)
+
+        status = artio_particle_clear_sfc_cache( self.handle)
+        check_artio_status(status)
+
+        free(grid_variables)
         free(num_octs_per_level)
+        free(num_particles_per_species)
+        self.oct_count = oct_count
+        self.doct_count = <np.int64_t *> oct_count.data
         self.root_mesh_handler = ARTIORootMeshContainer(self)
-        self.oct_count = oct_count
 
     def free_mesh(self):
         self.octree_handler = None
         self.root_mesh_handler = None
+        self.doct_count = NULL
         self.oct_count = None
 
 def get_coords(artio_fileset handle, np.int64_t s):
@@ -661,9 +727,11 @@
     # this, we will avoid creating it as long as possible.
 
     cdef public artio_fileset artio_handle
+    cdef ARTIOSFCRangeHandler range_handler
     cdef np.int64_t level_indices[32]
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
+        self.range_handler = range_handler
         self.artio_handle = range_handler.artio_handle
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
@@ -781,6 +849,12 @@
         #   * Enable preloading during mesh initialization
         #   * Calculate domain indices on the fly rather than with a
         #     double-loop to calculate domain_counts
+        # The cons should be in order
+        cdef np.int64_t sfc_start, sfc_end
+        sfc_start = self.domains[0].con_id
+        sfc_end = self.domains[self.num_domains - 1].con_id
+        status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end )
+        check_artio_status(status) 
         cdef np.int64_t offset = 0 
         for si in range(self.num_domains):
             sfc = self.domains[si].con_id
@@ -814,6 +888,8 @@
                     dest[i + offset] = source[oct_ind, cell_inds[i + offset]]
             # Now, we offset by the actual number filled here.
             offset += domain_counts[si]
+        status = artio_grid_clear_sfc_cache(handle)
+        check_artio_status(status)
         free(field_ind)
         free(field_vals)
         free(grid_variables)
@@ -825,12 +901,18 @@
         sfc_start = self.domains[0].con_id
         sfc_end = self.domains[self.num_domains - 1].con_id
         rv = read_sfc_particles(self.artio_handle, sfc_start, sfc_end,
-                                0, fields)
+                                0, fields, self.range_handler.doct_count,
+                                self.range_handler.pcount)
         return rv
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef read_sfc_particles(artio_fileset artio_handle,
                         np.int64_t sfc_start, np.int64_t sfc_end,
-                        int read_unrefined, fields):
+                        int read_unrefined, fields,
+                        np.int64_t *doct_count,
+                        np.int64_t **pcount):
     cdef int status, ispec, subspecies
     cdef np.int64_t sfc, particle, pid, ind, vind
     cdef int num_species = artio_handle.num_species
@@ -876,32 +958,16 @@
         vpoints[ispec].n_p = 0
         vpoints[ispec].n_s = 0
 
-    status = artio_particle_cache_sfc_range( handle,
-            sfc_start, sfc_end ) 
-    check_artio_status(status)
-
-    # We cache so we can figure out if the cell is refined or not.
-    status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end)
-    check_artio_status(status) 
-
     # Pass through once.  We want every single particle.
+    tp = 0
+    cdef np.int64_t c
     for sfc in range(sfc_start, sfc_end + 1):
-        status = artio_grid_read_root_cell_begin( handle,
-            sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
-        check_artio_status(status)
-        status = artio_grid_read_root_cell_end(handle)
-        check_artio_status(status)
-        if read_unrefined == 1 and num_oct_levels > 0: continue
-        if read_unrefined == 0 and num_oct_levels == 0: continue
-        status = artio_particle_read_root_cell_begin( handle, sfc,
-                num_particles_per_species )
-        check_artio_status(status)
+        c = doct_count[sfc - sfc_start]
+        if read_unrefined == 1 and c > 0: continue
+        if read_unrefined == 0 and c == 0: continue
 
         for ispec in range(num_species):
-            total_particles[ispec] += num_particles_per_species[ispec]
-
-        status = artio_particle_read_root_cell_end( handle )
-        check_artio_status(status)
+            total_particles[ispec] += pcount[ispec][sfc - sfc_start]
 
     # Now we allocate our final fields, which will be filled
     #for ispec in range(num_species):
@@ -918,39 +984,50 @@
         vp = &vpoints[species]
         if field == "MASS" and params["particle_species_mass"][species] != 0.0:
             vp.n_mass = 1
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             # We fill this *now*
             npf64arr += params["particle_species_mass"][species]
             vp.mass = <np.float64_t*> npf64arr.data
         elif field == "PID":
             vp.n_pid = 1
-            npi64arr = data[(species, field)] = np.zeros(tp, dtype="int64")
+            data[(species, field)] = np.zeros(tp, dtype="int64")
+            npi64arr = data[(species, field)]
             vp.pid = <np.int64_t*> npi64arr.data
         elif field == "SPECIES":
             vp.n_species = 1
-            npi8arr = data[(species, field)] = np.zeros(tp, dtype="int8")
+            data[(species, field)] = np.zeros(tp, dtype="int8")
+            npi8arr = data[(species, field)]
             # We fill this *now*
             npi8arr += species
             vp.species = <np.int8_t*> npi8arr.data
         elif npri_vars[species] > 0 and field in pri_vars :
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             vp.p_ind[vp.n_p] = pri_vars.index(field)
             vp.pvars[vp.n_p] = <np.float64_t *> npf64arr.data
             vp.n_p += 1
         elif nsec_vars[species] > 0 and field in sec_vars :
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             vp.s_ind[vp.n_s] = sec_vars.index(field)
             vp.svars[vp.n_s] = <np.float64_t *> npf64arr.data
             vp.n_s += 1
 
+    status = artio_particle_cache_sfc_range( handle,
+            sfc_start, sfc_end ) 
+    check_artio_status(status)
+
     for sfc in range(sfc_start, sfc_end + 1):
-        status = artio_grid_read_root_cell_begin( handle,
-            sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+        c = doct_count[sfc - sfc_start]
         check_artio_status(status)
-        status = artio_grid_read_root_cell_end(handle)
-        check_artio_status(status)
-        if read_unrefined == 1 and num_oct_levels > 0: continue
-        if read_unrefined == 0 and num_oct_levels == 0: continue
+        if read_unrefined == 1 and c > 0: continue
+        if read_unrefined == 0 and c == 0: continue
+        c = 0
+        for ispec in range(num_species) : 
+            if accessed_species[ispec] == 0: continue
+            c += pcount[ispec][sfc - sfc_start]
+        if c == 0: continue
         status = artio_particle_read_root_cell_begin( handle, sfc,
                 num_particles_per_species )
         check_artio_status(status)
@@ -986,11 +1063,8 @@
         status = artio_particle_read_root_cell_end( handle )
         check_artio_status(status)
 
-    #status = artio_particle_clear_sfc_cache(handle)
-    #check_artio_status(status)
-
-    #status = artio_grid_clear_sfc_cache(handle)
-    #check_artio_status(status)
+    status = artio_particle_clear_sfc_cache(handle)
+    check_artio_status(status)
 
     free(num_octs_per_level)
     free(num_particles_per_species)
@@ -1011,11 +1085,15 @@
     cdef np.uint64_t sfc_start
     cdef np.uint64_t sfc_end
     cdef public object _last_mask
-    cdef public object _last_selector_id
+    cdef public np.int64_t _last_selector_id
+    cdef np.int64_t _last_mask_sum
     cdef ARTIOSFCRangeHandler range_handler
+    cdef np.uint8_t *sfc_mask
+    cdef np.int64_t nsfc
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
         cdef int i
+        cdef np.int64_t sfci
         for i in range(3):
             self.DLE[i] = range_handler.DLE[i]
             self.DRE[i] = range_handler.DRE[i]
@@ -1023,10 +1101,27 @@
             self.dds[i] = range_handler.dds[i]
         self.handle = range_handler.handle
         self.artio_handle = range_handler.artio_handle
-        self._last_mask = self._last_selector_id = None
+        self._last_mask = None
+        self._last_selector_id = -1
         self.sfc_start = range_handler.sfc_start
         self.sfc_end = range_handler.sfc_end
         self.range_handler = range_handler
+        # We assume that the number of octs has been created and filled
+        # already.  We no longer care about ANY of the SFCs that have octs
+        # inside them -- this goes for every operation that this object
+        # performs.
+        self.sfc_mask = <np.uint8_t *>malloc(sizeof(np.uint8_t) *
+          self.sfc_end - self.sfc_start + 1)
+        self.nsfc = 0
+        for sfci in range(self.sfc_end - self.sfc_start + 1):
+            if self.range_handler.oct_count[sfci] > 0:
+                self.sfc_mask[sfci] = 0
+            else:
+                self.sfc_mask[sfci] = 1
+                self.nsfc += 1
+
+    def __dealloc__(self):
+        free(self.sfc_mask)
 
     @cython.cdivision(True)
     cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
@@ -1054,19 +1149,24 @@
         cdef int i
         return self.mask(selector).sum()
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             # Note that we do *no* checks on refinement here.  In fact, this
             # entire setup should not need to touch the disk except if the
             # artio sfc calculators need to.
@@ -1076,20 +1176,25 @@
             filled += 1
         return coords
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef np.float64_t pos[3]
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             # Note that we do *no* checks on refinement here.  In fact, this
             # entire setup should not need to touch the disk except if the
             # artio sfc calculators need to.
@@ -1099,23 +1204,29 @@
             filled += 1
         return coords
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef int i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.float64_t, ndim=2] width
         width = np.zeros((num_cells, 3), dtype="float64")
         for i in range(3):
             width[:,i] = self.dds[i]
         return width
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.zeros(num_cells, dtype="int64")
         return res
@@ -1132,7 +1243,7 @@
         # other.  Note that we *do* apply the selector here.
         cdef np.int64_t num_cells = -1
         cdef np.int64_t ind
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef np.float64_t pos[3]
         cdef np.float64_t dpos[3]
         cdef int dim, status, filled = 0
@@ -1149,7 +1260,7 @@
             # Note that RAMSES can have partial refinement inside an Oct.  This
             # means we actually do want the number of Octs, not the number of
             # cells.
-            num_cells = mask.sum()
+            num_cells = self._last_mask_sum
             if dims > 1:
                 dest = np.zeros((num_cells, dims), dtype=source.dtype,
                     order='C')
@@ -1158,7 +1269,9 @@
         ddata = (<char*>dest.data) + offset*ss*dims
         ind = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             memcpy(ddata, sdata + ind, dims * ss)
             ddata += dims * ss
             filled += 1
@@ -1170,56 +1283,55 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
+             int domain_id = -1): 
+        # We take a domain_id here to avoid subclassing
         cdef int i
         cdef np.float64_t pos[3]
-        cdef np.int64_t sfc
-        cdef np.ndarray[np.int64_t, ndim=1] oct_count
+        cdef np.int64_t sfc, sfci = -1
         if self._last_selector_id == hash(selector):
             return self._last_mask
-        if num_cells == -1:
-            # We need to count, but this process will only occur one time,
-            # since num_cells will later be cached.
-            num_cells = self.sfc_end - self.sfc_start + 1
-        mask = np.zeros((num_cells), dtype="uint8")
-        oct_count = self.range_handler.oct_count
+        cdef np.ndarray[np.uint8_t, ndim=1] mask
+        mask = np.zeros((self.nsfc), dtype="uint8")
+        self._last_mask_sum = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if oct_count[sfc - self.sfc_start] > 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
             self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
-            mask[sfc - self.sfc_start] = 1
+            mask[sfci] = 1
+            self._last_mask_sum += 1
         self._last_mask = mask.astype("bool")
         self._last_selector_id = hash(selector)
         return self._last_mask
 
+
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
                                 self.sfc_start, self.sfc_end,
-                                1, fields)
+                                1, fields, self.range_handler.doct_count,
+                                self.range_handler.pcount)
         return rv
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fill_sfc(self, SelectorObject selector, field_indices):
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
-        cdef np.int64_t sfc, num_cells
+        cdef np.int64_t sfc, num_cells, sfci = -1
         cdef np.float64_t val
-        cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef double dpos[3]
         # We duplicate some of the grid_variables stuff here so that we can
         # potentially release the GIL
         nf = len(field_indices)
         ngv = self.artio_handle.num_grid_variables
-        max_level = self.artio_handle.max_level
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         tr = []
         for i in range(nf):
             tr.append(np.zeros(num_cells, dtype="float64"))
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
-        cdef float *grid_variables = <float *>malloc(
-            ngv * sizeof(float))
         cdef int* field_ind = <int*> malloc(
             nf * sizeof(int))
         cdef np.float64_t **field_vals = <np.float64_t**> malloc(
@@ -1232,29 +1344,23 @@
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
         cdef int filled = 0
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status) 
+        cdef float **mesh_data = self.range_handler.root_mesh_data
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, grid_variables, &num_oct_levels,
-                    num_octs_per_level)
-            check_artio_status(status) 
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             for i in range(nf):
-                field_vals[i][filled] = grid_variables[field_ind[i]]
+                field_vals[i][filled] = mesh_data[field_ind[i]][
+                    sfc - self.sfc_start]
             filled += 1
-            status = artio_grid_read_root_cell_end( handle )
-            check_artio_status(status)
         # Now we have all our sources.
-        #status = artio_grid_clear_sfc_cache(handle)
-        #check_artio_status(status)
         free(field_ind)
         free(field_vals)
-        free(grid_variables)
-        free(num_octs_per_level)
         return tr
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def deposit(self, ParticleDepositOperation pdeposit,
                 SelectorObject selector,
                 np.ndarray[np.float64_t, ndim=2] positions,
@@ -1262,21 +1368,26 @@
         # This implements the necessary calls to enable particle deposition to
         # occur as needed.
         cdef int nf, i, j
+        cdef np.int64_t sfc, sfci
         if fields is None:
             fields = []
         nf = len(fields)
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         cdef np.ndarray[np.int64_t, ndim=1] domain_ind
-        domain_ind = np.zeros(mask.shape[0], dtype="int64") - 1
+        domain_ind = np.zeros(self.sfc_end - self.sfc_start + 1,
+                              dtype="int64") - 1
         j = 0
-        for i in range(mask.shape[0]):
-            if mask[i] == 1:
-                domain_ind[i] = j
-                j += 1
+        sfci = -1
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: 
+                continue
+            domain_ind[sfc - self.sfc_start] = j
+            j += 1
         cdef np.float64_t **field_pointers, *field_vals, pos[3], left_edge[3]
         cdef int coords[3]
-        cdef np.int64_t sfc
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
         field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
@@ -1306,5 +1417,72 @@
                 for j in range(nf):
                     field_pointers[j][i] = field_vals[j] 
 
+cdef class SFCRangeSelector(SelectorObject):
+    
+    cdef SelectorObject base_selector
+    cdef ARTIOSFCRangeHandler range_handler
+    cdef ARTIORootMeshContainer mesh_container
+    cdef np.int64_t sfc_start, sfc_end
+
+    def __init__(self, dobj):
+        self.base_selector = dobj.base_selector
+        self.mesh_container = dobj.oct_handler
+        self.range_handler = self.mesh_container.range_handler
+        self.sfc_start = self.mesh_container.sfc_start
+        self.sfc_end = self.mesh_container.sfc_end
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        return 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        return self.select_point(pos)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef np.int64_t sfc = self.mesh_container.pos_to_sfc(pos)
+        if sfc > self.sfc_end: return 0
+        cdef np.int64_t oc = self.range_handler.doct_count[
+            sfc - self.sfc_start]
+        if oc > 0: return 0
+        return 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        return self.base_selector.select_bbox(left_edge, right_edge)
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        # Because visitors now use select_grid, we should be explicitly
+        # checking this.
+        return self.base_selector.select_grid(left_edge, right_edge, level, o)
+    
+    def _hash_vals(self):
+        return (hash(self.base_selector), self.sfc_start, self.sfc_end)
+
 sfc_subset_selector = AlwaysSelector
+#sfc_subset_selector = SFCRangeSelector
 

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -77,6 +77,7 @@
         return self.sfc_end
 
     def fill(self, fields, selector):
+        if len(fields) == 0: return []
         handle = self.oct_handler.artio_handle
         field_indices = [handle.parameters["grid_variable_labels"].index(
                         yt_to_art[f]) for (ft, f) in fields]
@@ -93,6 +94,7 @@
         return tr
 
     def fill_particles(self, fields):
+        if len(fields) == 0: return {}
         art_fields = []
         for s, f in fields:
             fn = yt_to_art[f]
@@ -132,6 +134,7 @@
 
     def fill(self, fields, selector):
         # We know how big these will be.
+        if len(fields) == 0: return []
         handle = self.pf._handle
         field_indices = [handle.parameters["grid_variable_labels"].index(
                         yt_to_art[f]) for (ft, f) in fields]
@@ -172,6 +175,10 @@
         self.float_type = np.float64
         super(ARTIOGeometryHandler, self).__init__(pf, data_style)
 
+    @property
+    def max_range(self):
+        return self.parameter_file.max_range
+
     def _setup_geometry(self):
         mylog.debug("Initializing Geometry Handler empty for now.")
 
@@ -246,15 +253,18 @@
             nz = getattr(dobj, "_num_zones", 0)
             if all_data:
                 mylog.debug("Selecting entire artio domain")
-                list_sfc_ranges = self.pf._handle.root_sfc_ranges_all()
+                list_sfc_ranges = self.pf._handle.root_sfc_ranges_all(
+                    max_range_size = self.max_range)
             elif sfc_start is not None and sfc_end is not None:
                 mylog.debug("Restricting to %s .. %s", sfc_start, sfc_end)
                 list_sfc_ranges = [(sfc_start, sfc_end)]
             else:
                 mylog.debug("Running selector on artio base grid")
                 list_sfc_ranges = self.pf._handle.root_sfc_ranges(
-                    dobj.selector)
+                    dobj.selector, max_range_size = self.max_range)
             ci = []
+            #v = np.array(list_sfc_ranges)
+            #list_sfc_ranges = [ (v.min(), v.max()) ]
             for (start, end) in list_sfc_ranges:
                 range_handler = ARTIOSFCRangeHandler(
                     self.pf.domain_dimensions,
@@ -327,6 +337,7 @@
     _fieldinfo_known = KnownARTIOFields
     _particle_mass_name = "particle_mass"
     _particle_coordinates_name = "Coordinates"
+    max_range = 1024
 
     def __init__(self, filename, data_style='artio',
                  storage_filename=None):

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -19,7 +19,8 @@
     requires_pf, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.artio.api import ARTIOStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude",
@@ -31,12 +32,15 @@
     pf = data_dir_load(sizmbhloz)
     yield assert_equal, str(pf), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         sizmbhloz, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        sizmbhloz, field, ds)
+            yield FieldValuesTest(sizmbhloz, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(gc)
     yield assert_equal, str(pf), "data.0077.3d.hdf5"
     for test in small_patch_amr(gc, _fields):
+        test_gc.__name__ = test.description
         yield test
 
 tb = "TurbBoxLowRes/data.0005.3d.hdf5"
@@ -37,4 +38,5 @@
     pf = data_dir_load(tb)
     yield assert_equal, str(pf), "data.0005.3d.hdf5"
     for test in small_patch_amr(tb, _fields):
+        test_tb.__name__ = test.description
         yield test

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
     for test in small_patch_amr(m7, _fields):
+        test_moving7.__name__ = test.description
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@@ -37,4 +38,5 @@
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
+        test_galaxy0030.__name__ = test.description
         yield test

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
     for test in small_patch_amr(sloshing, _fields):
+        test_sloshing.__name__ = test.description
         yield test
 
 _fields_2d = ("Temperature", "Density")
@@ -39,4 +40,5 @@
     pf = data_dir_load(wt)
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
+        test_wind_tunnel.__name__ = test.description
         yield test

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -49,7 +49,6 @@
             ray = pf.h.ray(p1, p2)
             yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
     for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                yield FieldValuesTest(c5, field, ds)
+        for ds in dso:
+            yield FieldValuesTest(c5, field, ds)
 

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/orion/tests/test_outputs.py
--- a/yt/frontends/orion/tests/test_outputs.py
+++ b/yt/frontends/orion/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(radadvect)
     yield assert_equal, str(pf), "plt00000"
     for test in small_patch_amr(radadvect, _fields):
+        test_radadvect.__name__ = test.description
         yield test
 
 rt = "RadTube/plt00500"
@@ -37,4 +38,5 @@
     pf = data_dir_load(rt)
     yield assert_equal, str(pf), "plt00500"
     for test in small_patch_amr(rt, _fields):
+        test_radtube.__name__ = test.description
         yield test

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -19,7 +19,8 @@
     requires_pf, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.artio.api import ARTIOStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude",
@@ -31,13 +32,15 @@
     pf = data_dir_load(output_00080)
     yield assert_equal, str(pf), "info_00080"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         output_00080, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        output_00080, field, ds)
-
+            yield FieldValuesTest(output_00080, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -430,7 +430,10 @@
 
         self._unit_base = unit_base or {}
         self._cosmology_parameters = cosmology_parameters
+        if parameter_file is not None:
+            parameter_file = os.path.abspath(parameter_file)
         self._param_file = parameter_file
+        filename = os.path.abspath(filename)
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -21,7 +21,8 @@
     big_patch_amr, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.sph.api import OWLSStaticOutput
 
 _fields = (("deposit", "all_density"), ("deposit", "all_count"),
@@ -40,13 +41,15 @@
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in pf.particle_types if ptype != "all")
     yield assert_equal, tot, (2*128*128*128)
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         os33, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        os33, field, ds)
-
+            yield FieldValuesTest(os33, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/frontends/sph/tests/test_tipsy.py
--- /dev/null
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -0,0 +1,95 @@
+"""
+Tipsy tests using the AGORA dataset
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest, \
+    create_obj
+from yt.frontends.sph.api import TipsyStaticOutput
+
+_fields = (("deposit", "all_density"),
+           ("deposit", "all_count"),
+           ("deposit", "DarkMatter_density"),
+)
+
+pkdgrav = "halo1e11_run1.00400/halo1e11_run1.00400"
+ at requires_pf(pkdgrav, file_check = True)
+def test_pkdgrav():
+    cosmology_parameters = dict(current_redshift = 0.0,
+                                omega_lambda = 0.728,
+                                omega_matter = 0.272,
+                                hubble_constant = 0.702)
+    kwargs = dict(endian="<",
+                  field_dtypes = {"Coordinates": "d"},
+                  cosmology_parameters = cosmology_parameters,
+                  unit_base = {'mpchcm': 1.0/60.0},
+                  n_ref = 64)
+    pf = data_dir_load(pkdgrav, TipsyStaticOutput, (), kwargs)
+    yield assert_equal, str(pf), "halo1e11_run1.00400"
+    dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape, (26847360, 3)
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, 26847360
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf, axis, field, weight_field,
+                        ds)
+            yield FieldValuesTest(pf, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2
+
+gasoline = "agora_1e11.00400/agora_1e11.00400"
+ at requires_pf(gasoline, file_check = True)
+def test_gasoline():
+    cosmology_parameters = dict(current_redshift = 0.0,
+                                omega_lambda = 0.728,
+                                omega_matter = 0.272,
+                                hubble_constant = 0.702)
+    kwargs = dict(cosmology_parameters = cosmology_parameters,
+                  unit_base = {'mpchcm': 1.0/60.0},
+                  n_ref = 64)
+    pf = data_dir_load(gasoline, TipsyStaticOutput, (), kwargs)
+    yield assert_equal, str(pf), "agora_1e11.00400"
+    dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape, (10550576, 3)
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, 10550576
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf, axis, field, weight_field,
+                        ds)
+            yield FieldValuesTest(pf, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -409,11 +409,11 @@
     def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
              int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_oct_cells(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells), dtype="uint8")
+        coords = np.zeros((num_cells*8), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/gui/reason/extdirect_router.py
--- a/yt/gui/reason/extdirect_router.py
+++ b/yt/gui/reason/extdirect_router.py
@@ -9,6 +9,13 @@
 This code was released under the BSD License.
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import inspect
 
 class DirectException(Exception):
@@ -186,12 +193,4 @@
 
 
 
-"""
 
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r f21bb7349dfe102c0d5a18b0e5c1e6674a0f2989 -r 8293ecbdad842c0516ab332aee45fdfc0d24186e yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -14,6 +14,7 @@
 
 import itertools as it
 import numpy as np
+import importlib
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
@@ -252,3 +253,23 @@
                     list_of_kwarg_dicts[i][key] = keywords[key][0]
 
     return list_of_kwarg_dicts
+
+def requires_module(module):
+    """
+    Decorator that takes a module name as an argument and tries to import it.
+    If the module imports without issue, the function is returned, but if not, 
+    a null function is returned. This is so tests that depend on certain modules
+    being imported will not fail if the module is not installed on the testing
+    platform.
+    """
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    try:
+        importlib.import_module(module)
+    except ImportError:
+        return ffalse
+    else:
+        return ftrue
+    

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/01ec069b1a54/
Changeset:   01ec069b1a54
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 19:48:13
Summary:     Update RAMSES IO to use new particle IO
Affected #:  3 files

diff -r 8293ecbdad842c0516ab332aee45fdfc0d24186e -r 01ec069b1a54d77c2b782df183ab9449cef443f3 yt/frontends/artio/io.py
--- a/yt/frontends/artio/io.py
+++ b/yt/frontends/artio/io.py
@@ -56,9 +56,8 @@
                                  for fname in field_list]
         for ptype, field_list in sorted(ptf.items()):
             for ax in 'xyz':
-                pp = "particle_position_%s" % ax
                 if pp not in field_list:
-                    fields.append((ptype, pp))
+                    fields.append((ptype, pn % ax))
         for chunk in chunks: # These should be organized by grid filename
             for subset in chunk.objs:
                 rv = dict(**subset.fill_particles(fields))

diff -r 8293ecbdad842c0516ab332aee45fdfc0d24186e -r 01ec069b1a54d77c2b782df183ab9449cef443f3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -153,8 +153,8 @@
         _pfields = {}
         for field, vtype in particle_fields:
             if f.tell() >= flen: break
-            field_offsets["all", field] = f.tell()
-            _pfields["all", field] = vtype
+            field_offsets["io", field] = f.tell()
+            _pfields["io", field] = vtype
             fpu.skip(f, 1)
         self.particle_field_offsets = field_offsets
         self.particle_field_types = _pfields

diff -r 8293ecbdad842c0516ab332aee45fdfc0d24186e -r 01ec069b1a54d77c2b782df183ab9449cef443f3 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -49,40 +49,37 @@
             d[field] = np.concatenate(tr.pop(field))
         return d
 
-    def _read_particle_selection(self, chunks, selector, fields):
-        size = 0
-        masks = {}
-        chunks = list(chunks)
-        pos_fields = [("all","particle_position_%s" % ax) for ax in "xyz"]
+    def _read_particle_coords(self, chunks, ptf):
+        fields = [(ptype, "particle_position_%s" % ax)
+                  for ptype, field_list in ptf.items()
+                  for ax in 'xyz']
         for chunk in chunks:
             for subset in chunk.objs:
-                # We read the whole thing, then feed it back to the selector
-                selection = self._read_particle_subset(subset, pos_fields)
-                mask = selector.select_points(
-                    selection["all", "particle_position_x"],
-                    selection["all", "particle_position_y"],
-                    selection["all", "particle_position_z"])
-                if mask is None: continue
-                #print "MASK", mask
-                size += mask.sum()
-                masks[id(subset)] = mask
-        # Now our second pass
-        tr = {}
-        pos = 0
+                rv = self._read_particle_subset(subset, fields)
+                for ptype in sorted(ptf):
+                    yield ptype, (rv[ptype, pn % 'x'],
+                                  rv[ptype, pn % 'y'],
+                                  rv[ptype, pn % 'z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        pn = "particle_position_%s"
+        chunks = list(chunks)
+        fields = [(ptype, fname) for ptype, field_list in ptf.items()
+                                 for fname in field_list]
+        for ptype, field_list in sorted(ptf.items()):
+            for ax in 'xyz':
+                if pp not in field_list:
+                    fields.append((ptype, pn % ax))
         for chunk in chunks:
             for subset in chunk.objs:
-                selection = self._read_particle_subset(subset, fields)
-                mask = masks.pop(id(subset), None)
-                if mask is None: continue
-                count = mask.sum()
-                for field in fields:
-                    ti = selection.pop(field)[mask]
-                    if field not in tr:
-                        dt = subset.domain.particle_field_types[field]
-                        tr[field] = np.empty(size, dt)
-                    tr[field][pos:pos+count] = ti
-                pos += count
-        return tr
+                rv = self._read_particle_subset(subset, fields)
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z = (np.asarray(rv[ptype, pn % ax], "=f8")
+                               for ax in 'xyz')
+                    mask = selector.select_points(x, y, z)
+                    for field in field_list:
+                        yield ptype, rv[ptype, field]
+                    rv.pop(ptype)
 
     def _read_particle_subset(self, subset, fields):
         f = open(subset.domain.part_fn, "rb")
@@ -91,7 +88,7 @@
         # We do *all* conversion into boxlen here.
         # This means that no other conversions need to be applied to convert
         # positions into the same domain as the octs themselves.
-        for field in fields:
+        for field in sorted(fields, key = lambda a: foffsets[a]):
             f.seek(foffsets[field])
             dt = subset.domain.particle_field_types[field]
             tr[field] = fpu.read_vector(f, dt)


https://bitbucket.org/yt_analysis/yt-3.0/commits/21a50996fdd6/
Changeset:   21a50996fdd6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 20:10:54
Summary:     Converted RAMSES to use different particle types.
Affected #:  4 files

diff -r 01ec069b1a54d77c2b782df183ab9449cef443f3 -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -36,11 +36,6 @@
 
 import yt.utilities.lib as amr_utils
 
-def _check_ftype(field):
-    if isinstance(field.name, tuple):
-        return field.name[0]
-    return "all"
-
 EnzoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "EFI")
 add_field = EnzoFieldInfo.add_field
 
@@ -558,28 +553,22 @@
            data.pf.units["cm"]**3.0
 
 def _setup_particle_fields(registry, ptype):
-    df = []
-    df += particle_vector_functions(ptype,
+    particle_vector_functions(ptype,
             ["particle_position_%s" % ax for ax in 'xyz'],
             ["particle_velocity_%s" % ax for ax in 'xyz'],
             registry)
-    df += particle_deposition_functions(ptype,
-            "Coordinates", "particle_mass",
-            registry)
+    particle_deposition_functions(ptype, "Coordinates",
+        "particle_mass", registry)
 
     for ax in 'xyz':
         fn = "particle_velocity_%s" % ax
         cfunc = _get_vel_convert(ax)
-        df.append((ptype, fn))
         registry.add_field((ptype, fn), function=NullFunc,
                   convert_function=_get_vel_convert(ax),
                   particle_type=True)
     for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
               ["particle_position_%s" % ax for ax in 'xyz']:
-        df.append((ptype, fn))
         registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
 
     registry.add_field((ptype, "particle_mass"), function=NullFunc, 
               particle_type=True, convert_function = _convertParticleMass)
-    df.append((ptype, "particle_mass"))
-    return df

diff -r 01ec069b1a54d77c2b782df183ab9449cef443f3 -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -43,7 +43,8 @@
 from .fields import \
     RAMSESFieldInfo, \
     KnownRAMSESFields, \
-    create_cooling_fields
+    create_cooling_fields, \
+    _setup_particle_fields
 
 class RAMSESDomainFile(object):
     _last_mask = None
@@ -158,6 +159,7 @@
             fpu.skip(f, 1)
         self.particle_field_offsets = field_offsets
         self.particle_field_types = _pfields
+        self.particle_types = self.particle_types_raw = ("io",)
 
     def _read_amr_header(self):
         hvals = {}
@@ -506,6 +508,11 @@
         self.hubble_constant = rheader["H0"] / 100.0 # This is H100
         self.max_level = rheader['levelmax'] - self.min_level
 
+    def _setup_particle_type(self, ptype):
+        orig = set(self.field_info.keys())
+        _setup_particle_fields(self.field_info, ptype)
+        return list(set(self.field_info.keys()).difference(orig))
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not os.path.basename(args[0]).startswith("info_"): return False

diff -r 01ec069b1a54d77c2b782df183ab9449cef443f3 -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -87,38 +87,38 @@
     return data.convert("x-velocity")
 for ax in ['x','y','z']:
     f = KnownRAMSESFields["%s-velocity" % ax]
-    f._units = r"\rm{cm}/\rm{s}"
     f._convert_function = _convertVelocity
     f.take_log = False
 
-known_ramses_particle_fields = [
-    "particle_position_x",
-    "particle_position_y",
-    "particle_position_z",
-    "particle_velocity_x",
-    "particle_velocity_y",
-    "particle_velocity_z",
-    "particle_mass",
-    "particle_identifier",
-    "particle_refinement_level",
-    "particle_age",
-    "particle_metallicity",
-]
-
-for f in known_ramses_particle_fields:
-    add_ramses_field(("all", f), function=NullFunc, take_log=True,
-              particle_type = True)
-
-for ax in 'xyz':
-    KnownRAMSESFields["all", "particle_velocity_%s" % ax]._convert_function = \
-        _convertVelocity
-
 def _convertParticleMass(data):
     return data.convert("mass")
 
-KnownRAMSESFields["all", "particle_mass"]._convert_function = \
-        _convertParticleMass
-KnownRAMSESFields["all", "particle_mass"]._units = r"\mathrm{g}"
+def _setup_particle_fields(registry, ptype):
+    particle_vector_functions(ptype,
+            ["particle_position_%s" % ax for ax in 'xyz'],
+            ["particle_velocity_%s" % ax for ax in 'xyz'],
+            registry)
+    particle_deposition_functions(ptype, "Coordinates",
+        "particle_mass", registry)
+
+    for ax in 'xyz':
+        fn = "particle_velocity_%s" % ax
+        registry.add_field((ptype, fn), function=NullFunc,
+                  convert_function=_convertVelocity,
+                  units = r"\rm{cm}/\rm{s}",
+                  take_log = False,
+                  particle_type=True)
+    for fn in ["particle_position_%s" % ax for ax in 'xyz'] + \
+              ["particle_identifier", "particle_refinement_level",
+               "particle_age", "particle_metallicity"]:
+        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
+
+    registry.add_field((ptype, "particle_index"),
+      function=TranslationFunc((ptype, "particle_identifier")),
+      particle_type = True)
+    registry.add_field((ptype, "particle_mass"), function=NullFunc, 
+              particle_type=True, convert_function = _convertParticleMass)
+    return
 
 def _Temperature(field, data):
     rv = data["Pressure"]/data["Density"]
@@ -188,12 +188,6 @@
               validators=ValidateDataField("%s_Density" % species),
               display_name="%s\/Mass" % species)
 
-# PARTICLE FIELDS
-particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
-                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
-                          RAMSESFieldInfo)
-particle_deposition_functions("all", "Coordinates", "particle_mass",
-                               RAMSESFieldInfo)
 _cool_axes = ("lognH", "logT", "logTeq")
 _cool_arrs = ("metal", "cool", "heat", "metal_prime", "cool_prime",
               "heat_prime", "mu", "abundances")

diff -r 01ec069b1a54d77c2b782df183ab9449cef443f3 -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -50,6 +50,7 @@
         return d
 
     def _read_particle_coords(self, chunks, ptf):
+        pn = "particle_position_%s"
         fields = [(ptype, "particle_position_%s" % ax)
                   for ptype, field_list in ptf.items()
                   for ax in 'xyz']
@@ -68,7 +69,7 @@
                                  for fname in field_list]
         for ptype, field_list in sorted(ptf.items()):
             for ax in 'xyz':
-                if pp not in field_list:
+                if pn % ax not in field_list:
                     fields.append((ptype, pn % ax))
         for chunk in chunks:
             for subset in chunk.objs:


https://bitbucket.org/yt_analysis/yt-3.0/commits/901a4073c707/
Changeset:   901a4073c707
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 20:29:13
Summary:     Fixing a few problems with particle IO.
Affected #:  3 files

diff -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 -r 901a4073c7076b129119306d599a1958ef442865 yt/frontends/artio/io.py
--- a/yt/frontends/artio/io.py
+++ b/yt/frontends/artio/io.py
@@ -56,7 +56,7 @@
                                  for fname in field_list]
         for ptype, field_list in sorted(ptf.items()):
             for ax in 'xyz':
-                if pp not in field_list:
+                if pn % ax not in field_list:
                     fields.append((ptype, pn % ax))
         for chunk in chunks: # These should be organized by grid filename
             for subset in chunk.objs:
@@ -65,7 +65,8 @@
                     x, y, z = (np.asarray(rv[ptype][pn % ax], dtype="=f8")
                                for ax in 'xyz')
                     mask = selector.select_points(x, y, z)
+                    if mask is None: continue
                     for field in field_list:
                         data = np.asarray(rv[ptype][field], "=f8")
-                        yield (ptype, field), data
+                        yield (ptype, field), data[mask]
                     rv.pop(ptype)

diff -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 -r 901a4073c7076b129119306d599a1958ef442865 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -102,7 +102,7 @@
                         data = np.asarray(pds.get(field).value, "=f8")
                         if field in _convert_mass:
                             data *= g.dds.prod(dtype="f8")
-                        yield (ptype, field), data
+                        yield (ptype, field), data[mask]
             f.close()
 
     def _read_fluid_selection(self, chunks, selector, fields, size):

diff -r 21a50996fdd6a115585fe5006770c4c3f6013ed3 -r 901a4073c7076b129119306d599a1958ef442865 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -79,8 +79,8 @@
                                for ax in 'xyz')
                     mask = selector.select_points(x, y, z)
                     for field in field_list:
-                        yield ptype, rv[ptype, field]
-                    rv.pop(ptype)
+                        data = np.asarray(rv.pop((ptype, field))[mask], "=f8")
+                        yield (ptype, field), data
 
     def _read_particle_subset(self, subset, fields):
         f = open(subset.domain.part_fn, "rb")


https://bitbucket.org/yt_analysis/yt-3.0/commits/4646f473ff40/
Changeset:   4646f473ff40
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 20:47:17
Summary:     Convert Gadget Binary to new particle format.
Affected #:  1 file

diff -r 901a4073c7076b129119306d599a1958ef442865 -r 4646f473ff40ef3c1c0fc31beb8c87deb6560590 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -187,28 +187,28 @@
     _fields = ( "Coordinates",
                 "Velocities",
                 "ParticleIDs",
-                ("Masses", ZeroMass),
+                "Masses",
                 ("InternalEnergy", "Gas"),
                 ("Density", "Gas"),
                 ("SmoothingLength", "Gas"),
     )
 
+    _var_mass = None
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.pf["Massarr"]):
+                if v == 0:
+                    vm.append(self._ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
-    def _read_particle_selection(self, chunks, selector, fields):
-        rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        ptall = []
-        psize = defaultdict(lambda: 0)
-        chunks = list(chunks)
-        ptypes = set()
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-            ptypes.add(ftype)
-        ptypes = list(ptypes)
-        ptypes.sort(key = lambda a: self._ptypes.index(a))
+    def _read_particle_coords(self, chunks, ptf):
         data_files = set([])
         for chunk in chunks:
             for obj in chunk.objs:
@@ -217,23 +217,19 @@
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
-            for ptype in ptypes:
+            for ptype in ptf:
+                # This is where we could implement sub-chunking
                 f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
                 pos = self._read_field_from_file(f,
                             tp[ptype], "Coordinates")
-                psize[ptype] += selector.count_points(
-                    pos[:,0], pos[:,1], pos[:,2])
-                del pos
+                yield ptype, (pos[:,0], pos[:,1], pos[:,2])
             f.close()
-        ind = {}
-        for field in fields:
-            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            if field[1] in self._vector_fields:
-                shape = (psize[field[0]], 3)
-            else:
-                shape = psize[field[0]]
-            rv[field] = np.empty(shape, dtype="float64")
-            ind[field] = 0
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
         for data_file in data_files:
             poff = data_file.field_offsets
             tp = data_file.total_particles
@@ -247,16 +243,18 @@
                 del pos
                 if mask is None: continue
                 for field in field_list:
+                    if field == "Masses" and ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        m = self.pf.parameters["Massarr"][
+                            self._ptypes.index(ptype)]
+                        data[:] = m
+                        yield (ptype, field), data
+                        continue
                     f.seek(poff[ptype, field], os.SEEK_SET)
                     data = self._read_field_from_file(f, tp[ptype], field)
-                    data = data[mask]
-                    my_ind = ind[ptype, field]
-                    mylog.debug("Filling (%s, %s) from %s to %s",
-                        ptype, field, my_ind, my_ind+data.shape[0])
-                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                    ind[ptype, field] += data.shape[0]
+                    data = data[mask,...]
+                    yield (ptype, field), data
             f.close()
-        return rv
 
     def _read_field_from_file(self, f, count, name):
         if count == 0: return
@@ -309,6 +307,8 @@
                 continue
             pos += 4
             for ptype in self._ptypes:
+                if field == "Masses" and ptype not in self.var_mass:
+                    continue
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos


https://bitbucket.org/yt_analysis/yt-3.0/commits/e2b85937ee6a/
Changeset:   e2b85937ee6a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 20:52:26
Summary:     Set default FieldValues decimals to 10.
Affected #:  1 file

diff -r 4646f473ff40ef3c1c0fc31beb8c87deb6560590 -r e2b85937ee6ad1e4b407946b9681b34ed8714253 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -370,7 +370,7 @@
     _attrs = ("field", )
 
     def __init__(self, pf_fn, field, obj_type = None,
-                 decimals = None):
+                 decimals = 10):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field


https://bitbucket.org/yt_analysis/yt-3.0/commits/29a6481d1810/
Changeset:   29a6481d1810
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 22:03:28
Summary:     Fixing SZ projection argument order.
Affected #:  1 file

diff -r e2b85937ee6ad1e4b407946b9681b34ed8714253 -r 29a6481d181040a9784f97714c30c5dd5d11ae84 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -128,7 +128,7 @@
             return vpar/clight
         add_field("BetaPar", function=_beta_par)    
 
-        proj = self.pf.h.proj(axis, "Density", data_source=source)
+        proj = self.pf.h.proj("Density", axis, data_source=source)
         proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["Density"]


https://bitbucket.org/yt_analysis/yt-3.0/commits/e5435babfca0/
Changeset:   e5435babfca0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 00:09:55
Summary:     Fixes for particle types.
Affected #:  3 files

diff -r 29a6481d181040a9784f97714c30c5dd5d11ae84 -r e5435babfca024ee65a57bf1d44e739ae27f85a1 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -257,6 +257,20 @@
               units=r"\rm{g}",
               particle_type=True)
 
+    registry.add_field((ptype, "particle_mass_initial"),
+              function=NullFunc,
+              convert_function=_convertParticleMass,
+              units=r"\rm{g}",
+              particle_type=True)
+
+    for field in ["particle_index",
+                  "creation_time",
+                  "particle_species",
+                  "particle_metallicity1",
+                  "particle_metallicity2"]:
+        registry.add_field((ptype, field),
+            function=NullFunc, particle_type=True)
+
     particle_vector_functions(ptype,
         ["particle_position_%s" % ax for ax in 'xyz'],
         ["particle_velocity_%s" % ax for ax in 'xyz'],

diff -r 29a6481d181040a9784f97714c30c5dd5d11ae84 -r e5435babfca024ee65a57bf1d44e739ae27f85a1 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -567,6 +567,7 @@
                   convert_function=_get_vel_convert(ax),
                   particle_type=True)
     for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+              ["particle_type"] + \
               ["particle_position_%s" % ax for ax in 'xyz']:
         registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
 

diff -r 29a6481d181040a9784f97714c30c5dd5d11ae84 -r e5435babfca024ee65a57bf1d44e739ae27f85a1 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -35,6 +35,7 @@
     _base = slice(None)
 
     def _read_field_names(self, grid):
+        if grid.filename is None: return []
         f = h5py.File(grid.filename, "r")
         group = f["/Grid%08i" % grid.id]
         fields = []
@@ -58,6 +59,7 @@
         for chunk in chunks: # These should be organized by grid filename
             f = None
             for g in chunk.objs:
+                if g.filename is None: continue
                 if f is None:
                     #print "Opening (count) %s" % g.filename
                     f = h5py.File(g.filename, "r")
@@ -81,6 +83,7 @@
         for chunk in chunks: # These should be organized by grid filename
             f = None
             for g in chunk.objs:
+                if g.filename is None: continue
                 if f is None:
                     #print "Opening (read) %s" % g.filename
                     f = h5py.File(g.filename, "r")
@@ -133,6 +136,7 @@
         for chunk in chunks:
             fid = None
             for g in chunk.objs:
+                if g.filename is None: continue
                 if fid is None:
                     fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
                 data = np.empty(g.ActiveDimensions[::-1], dtype="float64")


https://bitbucket.org/yt_analysis/yt-3.0/commits/d75989c329c6/
Changeset:   d75989c329c6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 15:53:20
Summary:     Considerable number of fixes for particle type fields.
Affected #:  5 files

diff -r e5435babfca024ee65a57bf1d44e739ae27f85a1 -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -222,6 +222,7 @@
                 self, data_style=self.data_style)
             # Now we do things that we need an instantiated hierarchy for
             if "all" not in self.particle_types:
+                mylog.debug("Creating Particle Union 'all'")
                 pu = ParticleUnion("all", list(self.particle_types_raw))
                 self.add_particle_union(pu)
         return self._instantiated_hierarchy
@@ -336,8 +337,10 @@
         self.particle_unions[union.name] = union
         fields = [ (union.name, field) for field in fields]
         self.h.field_list.extend(fields)
-        self.h._setup_unknown_fields(fields)
+        # Give ourselves a chance to add them here, first, then...
+        # ...if we can't find them, we set them up as defaults.
         self.h._setup_particle_types([union.name])
+        self.h._setup_unknown_fields(fields, self.field_info)
 
     def _setup_particle_type(self, ptype):
         mylog.debug("Don't know what to do with %s", ptype)

diff -r e5435babfca024ee65a57bf1d44e739ae27f85a1 -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -948,9 +948,10 @@
         return os.path.exists("%s.hierarchy" % args[0])
 
     def _setup_particle_type(self, ptype):
-        orig = set(self.field_info.keys())
+        orig = set(self.field_info.items())
         _setup_particle_fields(self.field_info, ptype)
-        return list(set(self.field_info.keys()).difference(orig))
+        tr = [n for n, v in set(self.field_info.items()).difference(orig)]
+        return tr
 
 class EnzoStaticOutputInMemory(EnzoStaticOutput):
     _hierarchy_class = EnzoHierarchyInMemory

diff -r e5435babfca024ee65a57bf1d44e739ae27f85a1 -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -567,7 +567,7 @@
                   convert_function=_get_vel_convert(ax),
                   particle_type=True)
     for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
-              ["particle_type"] + \
+              ["particle_type", "particle_index"] + \
               ["particle_position_%s" % ax for ax in 'xyz']:
         registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
 

diff -r e5435babfca024ee65a57bf1d44e739ae27f85a1 -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -76,7 +76,7 @@
                     x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
                                for ax in 'xyz')
                     yield ptype, (x, y, z)
-            f.close()
+            if f: f.close()
 
     def _read_particle_fields(self, chunks, ptf, selector):
         chunks = list(chunks)
@@ -106,7 +106,7 @@
                         if field in _convert_mass:
                             data *= g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
-            f.close()
+            if f: f.close()
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -147,7 +147,7 @@
                     dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
                     nd = g.select(selector, data_view, rv[field], ind) # caches
                 ind += nd
-            fid.close()
+            if fid: fid.close()
         return rv
 
 

diff -r e5435babfca024ee65a57bf1d44e739ae27f85a1 -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -145,11 +145,12 @@
         if ptypes is None: ptypes = self.pf.particle_types_raw
         if None in (mname, cname, vname): 
             # If we don't know what to do, then let's not.
-            for ptype in ptypes:
+            for ptype in set(ptypes):
                 df += self.pf._setup_particle_type(ptype)
             # Now we have a bunch of new fields to add!
             # This is where the dependencies get calculated.
             self._derived_fields_add(df)
+            return
         fi = self.pf.field_info
         def _get_conv(cf):
             def _convert(data):
@@ -175,8 +176,10 @@
             df += self.pf._setup_particle_type(ptype)
         self._derived_fields_add(df)
 
-    def _setup_unknown_fields(self, list_of_fields = None):
-        known_fields = self.parameter_file._fieldinfo_known
+    def _setup_unknown_fields(self, list_of_fields = None,
+                              field_info = None,
+                              skip_particles = False):
+        field_info = field_info or self.pf._fieldinfo_known
         field_list = list_of_fields or self.field_list
         dftype = self.parameter_file.default_fluid_type
         mylog.debug("Checking %s", field_list)
@@ -185,16 +188,17 @@
             # current field info.  This means we'll instead simply override
             # it.
             ff = self.parameter_file.field_info.pop(field, None)
-            if field not in known_fields:
+            if field not in field_info:
                 # Now we check if it's a gas field or what ...
                 if isinstance(field, tuple) and field[0] in self.pf.particle_types:
                     particle_type = True
                 else:
                     particle_type = False
+                if particle_type and skip_particles: continue
                 if isinstance(field, tuple) and not particle_type and \
-                   field[1] in known_fields:
+                   field[1] in field_info:
                     mylog.debug("Adding known field %s to list of fields", field)
-                    self.pf.field_info[field] = known_fields[field[1]]
+                    self.pf.field_info[field] = field_info[field[1]]
                     continue
                 rootloginfo("Adding unknown field %s to list of fields", field)
                 cf = None
@@ -217,8 +221,8 @@
     def _setup_derived_fields(self):
         self.derived_field_list = []
         self.filtered_particle_types = []
-        fc, fac = self._derived_fields_to_check()
-        self._derived_fields_add(fc, fac)
+        fc = self._derived_fields_to_check()
+        self._derived_fields_add(fc)
 
     def _setup_filtered_type(self, filter):
         if not filter.available(self.derived_field_list):
@@ -238,23 +242,13 @@
         if available:
             self.parameter_file.particle_types += (filter.name,)
             self.filtered_particle_types.append(filter.name)
-            self._setup_particle_fields(filter.name, True)
+            self._setup_particle_types([filter.name])
         return available
 
-    def _setup_particle_fields(self, ptype, filtered = False):
-        pf = self.parameter_file
-        pmass = self.parameter_file._particle_mass_name
-        pcoord = self.parameter_file._particle_coordinates_name
-        if pmass is None or pcoord is None: return
-        df = particle_deposition_functions(ptype,
-            pcoord, pmass, self.parameter_file.field_info)
-        self._derived_fields_add(df)
-
     def _derived_fields_to_check(self):
         fi = self.parameter_file.field_info
         # First we construct our list of fields to check
         fields_to_check = []
-        fields_to_allcheck = []
         for field in fi.keys():
             finfo = fi[field]
             # Explicitly defined
@@ -274,15 +268,11 @@
                 fi[new_fi.name] = new_fi
                 new_fields.append(new_fi.name)
             fields_to_check += new_fields
-            fields_to_allcheck.append(field)
-        return fields_to_check, fields_to_allcheck
+        return fields_to_check
 
-    def _derived_fields_add(self, fields_to_check = None,
-                            fields_to_allcheck = None):
+    def _derived_fields_add(self, fields_to_check = None):
         if fields_to_check is None:
             fields_to_check = []
-        if fields_to_allcheck is None:
-            fields_to_allcheck = []
         fi = self.parameter_file.field_info
         for field in fields_to_check:
             try:
@@ -312,13 +302,6 @@
             if not fi[field].particle_type and not isinstance(field, tuple):
                 # Manually hardcode to 'gas'
                 self.parameter_file.field_dependencies["gas", field] = fd
-        for base_field in fields_to_allcheck:
-            # Now we expand our field_info with the new fields
-            all_available = all(((pt, field) in self.derived_field_list
-                                 for pt in self.parameter_file.particle_types))
-            if all_available:
-                self.derived_field_list.append( ("all", field) )
-                fi["all", base_field] = fi[base_field]
         for field in self.field_list:
             if field not in self.derived_field_list:
                 self.derived_field_list.append(field)


https://bitbucket.org/yt_analysis/yt-3.0/commits/2f0c558121a3/
Changeset:   2f0c558121a3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 15:54:37
Summary:     Applying fixes to the other _setup_particle_type functions.
Affected #:  3 files

diff -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f -r 2f0c558121a3b83feff4367c68301f920740a7d4 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -521,9 +521,9 @@
         self.periodicity = (True, True, True)
 
     def _setup_particle_type(self, ptype):
-        orig = set(self.field_info.keys())
+        orig = set(self.field_info.items())
         _setup_particle_fields(self.field_info, ptype)
-        return list(set(self.field_info.keys()).difference(orig))
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f -r 2f0c558121a3b83feff4367c68301f920740a7d4 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -950,8 +950,7 @@
     def _setup_particle_type(self, ptype):
         orig = set(self.field_info.items())
         _setup_particle_fields(self.field_info, ptype)
-        tr = [n for n, v in set(self.field_info.items()).difference(orig)]
-        return tr
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
 
 class EnzoStaticOutputInMemory(EnzoStaticOutput):
     _hierarchy_class = EnzoHierarchyInMemory

diff -r d75989c329c6ce1d8ec9c141ef881ee77da4cf8f -r 2f0c558121a3b83feff4367c68301f920740a7d4 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -509,9 +509,9 @@
         self.max_level = rheader['levelmax'] - self.min_level
 
     def _setup_particle_type(self, ptype):
-        orig = set(self.field_info.keys())
+        orig = set(self.field_info.items())
         _setup_particle_fields(self.field_info, ptype)
-        return list(set(self.field_info.keys()).difference(orig))
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt-3.0/commits/155424f9e11b/
Changeset:   155424f9e11b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 16:03:40
Summary:     known_fields not defined.
Affected #:  1 file

diff -r 2f0c558121a3b83feff4367c68301f920740a7d4 -r 155424f9e11b1fbe19cd3da70199b315ce010744 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -216,7 +216,7 @@
                         convert_function=cf, take_log=False, units=r"Unknown")
             else:
                 mylog.debug("Adding known field %s to list of fields", field)
-                self.parameter_file.field_info[field] = known_fields[field]
+                self.parameter_file.field_info[field] = field_info[field]
 
     def _setup_derived_fields(self):
         self.derived_field_list = []


https://bitbucket.org/yt_analysis/yt-3.0/commits/6538ebeaf488/
Changeset:   6538ebeaf488
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 16:37:00
Summary:     Standardize field names for SPH.
Affected #:  3 files

diff -r 155424f9e11b1fbe19cd3da70199b315ce010744 -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -41,7 +41,8 @@
     GadgetHDF5FieldInfo, \
     KnownGadgetHDF5Fields, \
     TipsyFieldInfo, \
-    KnownTipsyFields
+    KnownTipsyFields, \
+    _setup_particle_fields
 from yt.data_objects.field_info_container import \
     NullFunc, \
     TranslationFunc
@@ -121,17 +122,19 @@
                 ud[unit] = ur[unit] / base
 
     def _setup_particle_type(self, ptype):
+        orig = set(self.field_info.items())
         self.field_info.add_field((ptype, "particle_index"),
             function = TranslationFunc((ptype, "ParticleIDs")),
             particle_type = True)
-        return [ (ptype, "particle_index") ]
+        _setup_particle_fields(self.field_info, ptype)
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
 
 class GadgetStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
     _file_class = GadgetBinaryFile
     _fieldinfo_fallback = GadgetFieldInfo
     _fieldinfo_known = KnownGadgetFields
-    _particle_mass_name = "Masses"
+    _particle_mass_name = "Mass"
     _particle_coordinates_name = "Coordinates"
     _particle_velocity_name = "Velocities"
     _suffix = ""

diff -r 155424f9e11b1fbe19cd3da70199b315ce010744 -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -69,27 +69,26 @@
 # TIPSY
 # =====
 
-for ptype in ["Gas", "DarkMatter", "Stars"]:
-    KnownTipsyFields.add_field((ptype, "Mass"), function=NullFunc,
+def _setup_particle_fields(registry, ptype):
+    registry.add_field((ptype, "Mass"), function=NullFunc,
         particle_type = True,
         convert_function=_get_conv("mass"),
         units = r"\mathrm{g}")
-    KnownTipsyFields.add_field((ptype, "Velocities"), function=NullFunc,
+    registry.add_field((ptype, "Velocities"), function=NullFunc,
         particle_type = True,
         convert_function=_get_conv("velocity"),
         units = r"\mathrm{cm}/\mathrm{s}")
     # Note that we have to do this last so that TranslationFunc operates
     # correctly.
     particle_deposition_functions(ptype, "Coordinates", "Mass",
-                                  TipsyFieldInfo)
+                                  registry)
     particle_scalar_functions(ptype, "Coordinates", "Velocities",
-                              TipsyFieldInfo)
-particle_deposition_functions("all", "Coordinates", "Mass", TipsyFieldInfo)
+                              registry)
 
-for fname in ["Coordinates", "Velocities", "ParticleIDs", "Mass",
-              "Epsilon", "Phi"]:
-    TipsyFieldInfo.add_field(("all", fname), function=NullFunc,
-            particle_type = True)
+    for fname in ["Coordinates", "Velocities", "ParticleIDs", "Mass",
+                  "Epsilon", "Phi"]:
+        registry.add_field((ptype, fname), function=NullFunc,
+                particle_type = True)
 
 def SmoothedGas(field, data):
     pos = data["PartType0", "Coordinates"]

diff -r 155424f9e11b1fbe19cd3da70199b315ce010744 -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -94,7 +94,7 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
-                    if field in ("Masses", "Mass") and \
+                    if field in ("Mass", "Mass") and \
                         ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
                         ind = self._known_ptypes.index(ptype) 
@@ -187,7 +187,7 @@
     _fields = ( "Coordinates",
                 "Velocities",
                 "ParticleIDs",
-                "Masses",
+                "Mass",
                 ("InternalEnergy", "Gas"),
                 ("Density", "Gas"),
                 ("SmoothingLength", "Gas"),
@@ -243,7 +243,7 @@
                 del pos
                 if mask is None: continue
                 for field in field_list:
-                    if field == "Masses" and ptype not in self.var_mass:
+                    if field == "Mass" and ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
                         m = self.pf.parameters["Massarr"][
                             self._ptypes.index(ptype)]
@@ -307,7 +307,7 @@
                 continue
             pos += 4
             for ptype in self._ptypes:
-                if field == "Masses" and ptype not in self.var_mass:
+                if field == "Mass" and ptype not in self.var_mass:
                     continue
                 if (ptype, field) not in field_list:
                     continue


https://bitbucket.org/yt_analysis/yt-3.0/commits/785be13d2f5a/
Changeset:   785be13d2f5a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 17:11:27
Summary:     Adding a new top-level package, 'fields'.

This may become something else later, if we move to 'domain_contexts' or
something.
Affected #:  34 files

diff -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 -r 785be13d2f5a3c4e54bd6866c949e901068cffee yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -25,7 +25,7 @@
 from yt.utilities.physical_constants import \
     kpc_per_cm, \
     sec_per_year
-from yt.data_objects.universal_fields import add_field
+from yt.fields.universal_fields import add_field
 from yt.mods import *
 
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,

diff -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 -r 785be13d2f5a3c4e54bd6866c949e901068cffee yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ /dev/null
@@ -1,196 +0,0 @@
-"""
-These are common particle deposition fields.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    NullFunc, \
-    TranslationFunc
-from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs, \
-    mass_sun_cgs, \
-    mh
-
-def _field_concat(fname):
-    def _AllFields(field, data):
-        v = []
-        for ptype in data.pf.particle_types:
-            data.pf._last_freq = (ptype, None)
-            if ptype == "all" or \
-                ptype in data.pf.known_filters:
-                  continue
-            v.append(data[ptype, fname].copy())
-        rv = np.concatenate(v, axis=0)
-        return rv
-    return _AllFields
-
-def _field_concat_slice(fname, axi):
-    def _AllFields(field, data):
-        v = []
-        for ptype in data.pf.particle_types:
-            data.pf._last_freq = (ptype, None)
-            if ptype == "all" or \
-                ptype in data.pf.known_filters:
-                  continue
-            v.append(data[ptype, fname][:,axi])
-        rv = np.concatenate(v, axis=0)
-        return rv
-    return _AllFields
-
-def particle_deposition_functions(ptype, coord_name, mass_name, registry):
-    orig = set(registry.keys())
-    def particle_count(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, method = "count")
-        return d
-
-    registry.add_field(("deposit", "%s_count" % ptype),
-             function = particle_count,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype,
-             projection_conversion = '1')
-
-    def particle_mass(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
-
-    registry.add_field(("deposit", "%s_mass" % ptype),
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
-             units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}\/\mathrm{cm}",
-             projection_conversion = 'cm')
-
-    def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d /= data["gas","CellVolume"]
-        return d
-
-    registry.add_field(("deposit", "%s_density" % ptype),
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % ptype,
-             units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
-             projection_conversion = 'cm')
-
-    def particle_cic(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
-        d /= data["gas","CellVolume"]
-        return d
-
-    registry.add_field(("deposit", "%s_cic" % ptype),
-             function = particle_cic,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s CIC Density}" % ptype,
-             units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
-             projection_conversion = 'cm')
-
-    # Now some translation functions.
-
-    def particle_ones(field, data):
-        return np.ones(data[ptype, mass_name].shape, dtype="float64")
-
-    registry.add_field((ptype, "particle_ones"),
-                       function = particle_ones,
-                       particle_type = True,
-                       units = "")
-
-    registry.add_field((ptype, "ParticleMass"),
-            function = TranslationFunc((ptype, mass_name)),
-            particle_type = True,
-            units = r"\mathrm{g}")
-
-    def _ParticleMassMsun(field, data):
-        return data[ptype, mass_name].copy()
-    def _conv_Msun(data):
-        return 1.0/mass_sun_cgs
-
-    registry.add_field((ptype, "ParticleMassMsun"),
-            function = _ParticleMassMsun,
-            convert_function = _conv_Msun,
-            particle_type = True,
-            units = r"\mathrm{M}_\odot")
-
-    def particle_mesh_ids(field, data):
-        pos = data[ptype, coord_name]
-        ids = np.zeros(pos.shape[0], dtype="float64") - 1
-        # This is float64 in name only.  It will be properly cast inside the
-        # deposit operation.
-        #_ids = ids.view("float64")
-        data.deposit(pos, [ids], method = "mesh_id")
-        return ids
-    registry.add_field((ptype, "mesh_id"),
-            function = particle_mesh_ids,
-            validators = [ValidateSpatial()],
-            particle_type = True)
-
-    return list(set(registry.keys()).difference(orig))
-
-
-def particle_scalar_functions(ptype, coord_name, vel_name, registry):
-
-    # Now we have to set up the various velocity and coordinate things.  In the
-    # future, we'll actually invert this and use the 3-component items
-    # elsewhere, and stop using these.
-    
-    # Note that we pass in _ptype here so that it's defined inside the closure.
-    orig = set(registry.keys())
-
-    def _get_coord_funcs(axi, _ptype):
-        def _particle_velocity(field, data):
-            return data[_ptype, vel_name][:,axi]
-        def _particle_position(field, data):
-            return data[_ptype, coord_name][:,axi]
-        return _particle_velocity, _particle_position
-    for axi, ax in enumerate("xyz"):
-        v, p = _get_coord_funcs(axi, ptype)
-        registry.add_field((ptype, "particle_velocity_%s" % ax),
-            particle_type = True, function = v)
-        registry.add_field((ptype, "particle_position_%s" % ax),
-            particle_type = True, function = p)
-
-    return list(set(registry.keys()).difference(orig))
-
-def particle_vector_functions(ptype, coord_names, vel_names, registry):
-
-    # This will column_stack a set of scalars to create vector fields.
-    orig = set(registry.keys())
-
-    def _get_vec_func(_ptype, names):
-        def particle_vectors(field, data):
-            return np.column_stack([data[_ptype, name] for name in names])
-        return particle_vectors
-    registry.add_field((ptype, "Coordinates"),
-                       function=_get_vec_func(ptype, coord_names),
-                       particle_type=True)
-    registry.add_field((ptype, "Velocities"),
-                       function=_get_vec_func(ptype, vel_names),
-                       particle_type=True)
-    return list(set(registry.keys()).difference(orig))

diff -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 -r 785be13d2f5a3c4e54bd6866c949e901068cffee yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from yt.testing import *
-import numpy as np
-from yt.data_objects.field_info_container import \
-    FieldInfo
-import yt.data_objects.universal_fields
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-
-def setup():
-    from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
-
-_sample_parameters = dict(
-    axis = 0,
-    center = np.array((0.0, 0.0, 0.0)),
-    bulk_velocity = np.array((0.0, 0.0, 0.0)),
-    normal = np.array((0.0, 0.0, 1.0)),
-    cp_x_vec = np.array((1.0, 0.0, 0.0)),
-    cp_y_vec = np.array((0.0, 1.0, 0.0)),
-    cp_z_vec = np.array((0.0, 0.0, 1.0)),
-)
-
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
-
-def realistic_pf(fields, nprocs):
-    np.random.seed(int(0x4d3d3d3))
-    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
-    pf.parameters["HydroMethod"] = "streaming"
-    pf.parameters["Gamma"] = 5.0/3.0
-    pf.parameters["EOSType"] = 1.0
-    pf.parameters["EOSSoundSpeed"] = 1.0
-    pf.conversion_factors["Time"] = 1.0
-    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
-    pf.current_redshift = 0.0001
-    pf.hubble_constant = 0.7
-    pf.omega_matter = 0.27
-    for unit in mpc_conversion:
-        pf.units[unit+'h'] = pf.units[unit]
-        pf.units[unit+'cm'] = pf.units[unit]
-        pf.units[unit+'hcm'] = pf.units[unit]
-    return pf
-
-class TestFieldAccess(object):
-    description = None
-
-    def __init__(self, field_name, nproc):
-        # Note this should be a field name
-        self.field_name = field_name
-        self.description = "Accessing_%s_%s" % (field_name, nproc)
-        self.nproc = nproc
-
-    def __call__(self):
-        field = FieldInfo[self.field_name]
-        deps = field.get_dependencies()
-        fields = list(set(deps.requested + _base_fields))
-        skip_grids = False
-        needs_spatial = False
-        for v in field.validators:
-            f = getattr(v, "fields", None)
-            if f: fields += f
-            if getattr(v, "ghost_zones", 0) > 0:
-                skip_grids = True
-            if hasattr(v, "ghost_zones"):
-                needs_spatial = True
-        pf = realistic_pf(fields, self.nproc)
-        # This gives unequal sized grids as well as subgrids
-        dd1 = pf.h.all_data()
-        dd2 = pf.h.all_data()
-        dd1.field_parameters.update(_sample_parameters)
-        dd2.field_parameters.update(_sample_parameters)
-        v1 = dd1[self.field_name]
-        conv = field._convert_function(dd1) or 1.0
-        if not field.particle_type:
-            assert_equal(v1, dd1["gas", self.field_name])
-        if not needs_spatial:
-            assert_array_almost_equal_nulp(v1, conv*field._function(field, dd2), 4)
-        if not skip_grids:
-            for g in pf.h.grids:
-                g.field_parameters.update(_sample_parameters)
-                conv = field._convert_function(g) or 1.0
-                v1 = g[self.field_name]
-                g.clear_data()
-                g.field_parameters.update(_sample_parameters)
-                assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
-
-def test_all_fields():
-    for field in FieldInfo:
-        if isinstance(field, types.TupleType):
-            fname = field[0]
-        else:
-            fname = field
-        if fname.startswith("CuttingPlane"): continue
-        if fname.startswith("particle"): continue
-        if fname.startswith("CIC"): continue
-        if field.startswith("BetaPar"): continue
-        if field.startswith("TBetaPar"): continue
-        if field.startswith("BetaPerp"): continue
-        if fname.startswith("WeakLensingConvergence"): continue
-        if fname.startswith("DensityPerturbation"): continue
-        if fname.startswith("Matter_Density"): continue
-        if fname.startswith("Overdensity"): continue
-        if FieldInfo[field].particle_type: continue
-        for nproc in [1, 4, 8]:
-            test_all_fields.__name__ = "%s_%s" % (field, nproc)
-            yield TestFieldAccess(field, nproc)
-
-if __name__ == "__main__":
-    setup()
-    for t in test_all_fields():
-        t()

diff -r 6538ebeaf488e18720d10660ac40d3ddc21ffc91 -r 785be13d2f5a3c4e54bd6866c949e901068cffee yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ /dev/null
@@ -1,1559 +0,0 @@
-"""
-The basic field info container resides here.  These classes, code specific and
-universal, are the means by which we access fields across YT, both derived and
-native.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import types
-import numpy as np
-import inspect
-import copy
-
-from yt.funcs import *
-
-from yt.utilities.lib import obtain_rvec, obtain_rv_vec
-from yt.utilities.cosmology import Cosmology
-from field_info_container import \
-    add_field, \
-    ValidateDataField, \
-    ValidateGridType, \
-    ValidateParameter, \
-    ValidateSpatial, \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter, \
-    NullFunc
-
-from yt.utilities.physical_constants import \
-     mass_sun_cgs, \
-     mh, \
-     me, \
-     sigma_thompson, \
-     clight, \
-     kboltz, \
-     G, \
-     rho_crit_now, \
-     speed_of_light_cgs, \
-     km_per_cm, keV_per_K
-
-from yt.utilities.math_utils import \
-    get_sph_r_component, \
-    get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
-     
-def _GridLevel(field, data):
-    return np.ones(data.ActiveDimensions)*(data.Level)
-add_field("GridLevel", function=_GridLevel,
-          validators=[ValidateGridType(),
-                      ValidateSpatial(0)])
-
-def _GridIndices(field, data):
-    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
-add_field("GridIndices", function=_GridIndices,
-          validators=[ValidateGridType(),
-                      ValidateSpatial(0)], take_log=False)
-
-def _OnesOverDx(field, data):
-    return np.ones(data["Ones"].shape,
-                   dtype=data["Density"].dtype)/data['dx']
-add_field("OnesOverDx", function=_OnesOverDx,
-          display_field=False)
-
-def _Zeros(field, data):
-    return np.zeros(data["Ones"].shape, dtype='float64')
-add_field("Zeros", function=_Zeros,
-          projection_conversion="unitary",
-          display_field = False)
-
-def _Ones(field, data):
-    tr = np.ones(data.ires.shape, dtype="float64")
-    if data._spatial:
-        return data._reshape_vals(tr)
-    return tr
-add_field("Ones", function=_Ones,
-          projection_conversion="unitary",
-          display_field = False)
-add_field("CellsPerBin", function=_Ones,
-          display_field = False)
-
-def _SoundSpeed(field, data):
-    if data.pf["EOSType"] == 1:
-        return np.ones(data["Density"].shape, dtype='float64') * \
-                data.pf["EOSSoundSpeed"]
-    return ( data.pf["Gamma"]*data["Pressure"] / \
-             data["Density"] )**(1.0/2.0)
-add_field("SoundSpeed", function=_SoundSpeed,
-          units=r"\rm{cm}/\rm{s}")
-
-def _RadialMachNumber(field, data):
-    """M{|v|/t_sound}"""
-    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
-add_field("RadialMachNumber", function=_RadialMachNumber)
-
-def _MachNumber(field, data):
-    """M{|v|/t_sound}"""
-    return data["VelocityMagnitude"] / data["SoundSpeed"]
-add_field("MachNumber", function=_MachNumber)
-
-def _CourantTimeStep(field, data):
-    t1 = data['dx'] / (
-        data["SoundSpeed"] + \
-        abs(data["x-velocity"]))
-    t2 = data['dy'] / (
-        data["SoundSpeed"] + \
-        abs(data["y-velocity"]))
-    t3 = data['dz'] / (
-        data["SoundSpeed"] + \
-        abs(data["z-velocity"]))
-    return np.minimum(np.minimum(t1,t2),t3)
-def _convertCourantTimeStep(data):
-    # SoundSpeed and z-velocity are in cm/s, dx is in code
-    return data.convert("cm")
-add_field("CourantTimeStep", function=_CourantTimeStep,
-          convert_function=_convertCourantTimeStep,
-          units=r"$\rm{s}$")
-
-def _ParticleVelocityMagnitude(field, data):
-    """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
-             (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
-             (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
-add_field("ParticleVelocityMagnitude", function=_ParticleVelocityMagnitude,
-          particle_type=True, 
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
-def _VelocityMagnitude(field, data):
-    """M{|v|}"""
-    velocities = obtain_rv_vec(data)
-    return np.sqrt(np.sum(velocities**2,axis=0))
-add_field("VelocityMagnitude", function=_VelocityMagnitude,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
-def _TangentialOverVelocityMagnitude(field, data):
-    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
-add_field("TangentialOverVelocityMagnitude",
-          function=_TangentialOverVelocityMagnitude,
-          take_log=False)
-
-def _Pressure(field, data):
-    """M{(Gamma-1.0)*rho*E}"""
-    return (data.pf["Gamma"] - 1.0) * \
-           data["Density"] * data["ThermalEnergy"]
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
-
-def _TempkeV(field, data):
-    return data["Temperature"] * keV_per_K
-add_field("TempkeV", function=_TempkeV, units=r"\rm{keV}",
-          display_name="Temperature")
-
-def _Entropy(field, data):
-    if data.has_field_parameter("mu"):
-        mw = mh*data.get_field_parameter("mu")
-    else :
-        mw = mh
-    try:
-        gammam1 = data.pf["Gamma"] - 1.0
-    except:
-        gammam1 = 5./3. - 1.0
-    return kboltz * data["Temperature"] / \
-           ((data["Density"]/mw)**gammam1)
-add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
-          function=_Entropy)
-
-### spherical coordinates: r (radius)
-def _sph_r(field, data):
-    center = data.get_field_parameter("center")
-      
-    coords = obtain_rvec(data)
-
-    return get_sph_r(coords)
-
-def _Convert_sph_r_CGS(data):
-   return data.convert("cm")
-
-add_field("sph_r", function=_sph_r,
-         validators=[ValidateParameter("center")],
-         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
-
-
-### spherical coordinates: theta (angle with respect to normal)
-def _sph_theta(field, data):
-    center = data.get_field_parameter("center")
-    normal = data.get_field_parameter("normal")
-    
-    coords = obtain_rvec(data)
-
-    return get_sph_theta(coords, normal)
-
-add_field("sph_theta", function=_sph_theta,
-         validators=[ValidateParameter("center"),ValidateParameter("normal")])
-
-
-### spherical coordinates: phi (angle in the plane perpendicular to the normal)
-def _sph_phi(field, data):
-    center = data.get_field_parameter("center")
-    normal = data.get_field_parameter("normal")
-    
-    coords = obtain_rvec(data)
-
-    return get_sph_phi(coords, normal)
-
-add_field("sph_phi", function=_sph_phi,
-         validators=[ValidateParameter("center"),ValidateParameter("normal")])
-
-### cylindrical coordinates: R (radius in the cylinder's plane)
-def _cyl_R(field, data):
-    center = data.get_field_parameter("center")
-    normal = data.get_field_parameter("normal")
-      
-    coords = obtain_rvec(data)
-
-    return get_cyl_r(coords, normal)
-
-def _Convert_cyl_R_CGS(data):
-   return data.convert("cm")
-
-add_field("cyl_R", function=_cyl_R,
-         validators=[ValidateParameter("center"),ValidateParameter("normal")],
-         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
-add_field("cyl_RCode", function=_cyl_R,
-          validators=[ValidateParameter("center"),ValidateParameter("normal")],
-          units=r"Radius (code)")
-
-
-### cylindrical coordinates: z (height above the cylinder's plane)
-def _cyl_z(field, data):
-    center = data.get_field_parameter("center")
-    normal = data.get_field_parameter("normal")
-    
-    coords = obtain_rvec(data)
-
-    return get_cyl_z(coords, normal)
-
-def _Convert_cyl_z_CGS(data):
-   return data.convert("cm")
-
-add_field("cyl_z", function=_cyl_z,
-         validators=[ValidateParameter("center"),ValidateParameter("normal")],
-         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
-
-
-### cylindrical coordinates: theta (angle in the cylinder's plane)
-def _cyl_theta(field, data):
-    center = data.get_field_parameter("center")
-    normal = data.get_field_parameter("normal")
-    
-    coords = obtain_rvec(data)
-
-    return get_cyl_theta(coords, normal)
-
-add_field("cyl_theta", function=_cyl_theta,
-         validators=[ValidateParameter("center"),ValidateParameter("normal")])
-
-### The old field DiskAngle is the same as the spherical coordinates'
-### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
-def _DiskAngle(field, data):
-    return data['sph_theta']
-
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("center"),
-                      ValidateParameter("normal")],
-          display_field=False)
-
-
-### The old field Height is the same as the cylindrical coordinates' z
-### field. I'm keeping Height for backwards compatibility.
-def _Height(field, data):
-    return data['cyl_z']
-
-def _convertHeight(data):
-    return data.convert("cm")
-def _convertHeightAU(data):
-    return data.convert("au")
-add_field("Height", function=_Height,
-          convert_function=_convertHeight,
-          validators=[ValidateParameter("center"),
-                      ValidateParameter("normal")],
-          units=r"cm", display_field=False)
-add_field("HeightAU", function=_Height,
-          convert_function=_convertHeightAU,
-          validators=[ValidateParameter("center"),
-                      ValidateParameter("normal")],
-          units=r"AU", display_field=False)
-
-def _cyl_RadialVelocity(field, data):
-    normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data)
-
-    theta = data['cyl_theta']
-
-    return get_cyl_r_component(velocities, theta, normal)
-
-def _cyl_RadialVelocityABS(field, data):
-    return np.abs(_cyl_RadialVelocity(field, data))
-def _Convert_cyl_RadialVelocityKMS(data):
-    return km_per_cm
-add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
-          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
-          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-
-def _cyl_TangentialVelocity(field, data):
-    normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data)
-    theta = data['cyl_theta']
-
-    return get_cyl_theta_component(velocities, theta, normal)
-
-def _cyl_TangentialVelocityABS(field, data):
-    return np.abs(_cyl_TangentialVelocity(field, data))
-def _Convert_cyl_TangentialVelocityKMS(data):
-    return km_per_cm
-add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
-          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
-          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("normal")])
-
-def _DynamicalTime(field, data):
-    """
-    The formulation for the dynamical time is:
-    M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
-    Note that we return in our natural units already
-    """
-    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
-add_field("DynamicalTime", function=_DynamicalTime,
-           units=r"\rm{s}")
-
-def _CellMass(field, data):
-    return data["Density"] * data["CellVolume"]
-def _convertCellMassMsun(data):
-    return 1.0 / mass_sun_cgs # g^-1
-add_field("CellMass", function=_CellMass, units=r"\rm{g}")
-add_field("CellMassMsun", units=r"M_{\odot}",
-          function=_CellMass,
-          convert_function=_convertCellMassMsun)
-
-def _CellMassCode(field, data):
-    return data["Density"] * data["CellVolumeCode"]
-def _convertCellMassCode(data):
-    return 1.0/data.convert("Density")
-add_field("CellMassCode", 
-          function=_CellMassCode,
-          convert_function=_convertCellMassCode)
-
-def _TotalMass(field,data):
-    return (data["Density"]+data[("deposit", "particle_density")]) * data["CellVolume"]
-add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
-add_field("TotalMassMsun", units=r"M_{\odot}",
-          function=_TotalMass,
-          convert_function=_convertCellMassMsun)
-
-def _StarMass(field,data):
-    return data["star_density"] * data["CellVolume"]
-add_field("StarMassMsun", units=r"M_{\odot}",
-          function=_StarMass,
-          convert_function=_convertCellMassMsun)
-
-def _Matter_Density(field,data):
-    return (data['Density'] + data['particle_density'])
-add_field("Matter_Density",function=_Matter_Density,units=r"\rm{g}/\rm{cm^3}")
-
-def _ComovingDensity(field, data):
-    ef = (1.0 + data.pf.current_redshift)**3.0
-    return data["Density"]/ef
-add_field("ComovingDensity", function=_ComovingDensity, units=r"\rm{g}/\rm{cm}^3")
-
-# This is rho_total / rho_cr(z).
-def _Convert_Overdensity(data):
-    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
-                (1+data.pf.current_redshift)**3)
-add_field("Overdensity",function=_Matter_Density,
-          convert_function=_Convert_Overdensity, units=r"")
-
-# This is (rho_total - <rho_total>) / <rho_total>.
-def _DensityPerturbation(field, data):
-    rho_bar = rho_crit_now * data.pf.omega_matter * \
-        data.pf.hubble_constant**2 * \
-        (1.0 + data.pf.current_redshift)**3
-    return ((data['Matter_Density'] - rho_bar) / rho_bar)
-add_field("DensityPerturbation",function=_DensityPerturbation,units=r"")
-
-# This is rho_b / <rho_b>.
-def _Baryon_Overdensity(field, data):
-    if data.pf.has_key('omega_baryon_now'):
-        omega_baryon_now = data.pf['omega_baryon_now']
-    else:
-        omega_baryon_now = 0.0441
-    return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf.hubble_constant**2) * 
-                              ((1+data.pf.current_redshift)**3))
-add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
-          units=r"")
-
-# Weak lensing convergence.
-# Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
-def _convertConvergence(data):
-    if not data.pf.parameters.has_key('cosmology_calculator'):
-        data.pf.parameters['cosmology_calculator'] = Cosmology(
-            HubbleConstantNow=(100.*data.pf.hubble_constant),
-            OmegaMatterNow=data.pf.omega_matter, OmegaLambdaNow=data.pf.omega_lambda)
-    # observer to lens
-    DL = data.pf.parameters['cosmology_calculator'].AngularDiameterDistance(
-        data.pf.parameters['observer_redshift'], data.pf.current_redshift)
-    # observer to source
-    DS = data.pf.parameters['cosmology_calculator'].AngularDiameterDistance(
-        data.pf.parameters['observer_redshift'], data.pf.parameters['lensing_source_redshift'])
-    # lens to source
-    DLS = data.pf.parameters['cosmology_calculator'].AngularDiameterDistance(
-        data.pf.current_redshift, data.pf.parameters['lensing_source_redshift'])
-    # TODO: convert 1.5e14 to constants
-    return (((DL * DLS) / DS) * (1.5e14 * data.pf.omega_matter * 
-                                (data.pf.hubble_constant / speed_of_light_cgs)**2 *
-                                (1 + data.pf.current_redshift)))
-add_field("WeakLensingConvergence", function=_DensityPerturbation, 
-          convert_function=_convertConvergence, 
-          projection_conversion='mpccm')
-
-def _CellVolume(field, data):
-    if data['dx'].size == 1:
-        try:
-            return data['dx'] * data['dy'] * data['dz'] * \
-                np.ones(data.ActiveDimensions, dtype='float64')
-        except AttributeError:
-            return data['dx'] * data['dy'] * data['dz']
-    return data["dx"] * data["dy"] * data["dz"]
-def _ConvertCellVolumeMpc(data):
-    return data.convert("mpc")**3.0
-def _ConvertCellVolumeCGS(data):
-    return data.convert("cm")**3.0
-add_field("CellVolumeCode", units=r"\rm{BoxVolume}^3",
-          function=_CellVolume)
-add_field("CellVolumeMpc", units=r"\rm{Mpc}^3",
-          function=_CellVolume,
-          convert_function=_ConvertCellVolumeMpc)
-add_field("CellVolume", units=r"\rm{cm}^3",
-          function=_CellVolume,
-          convert_function=_ConvertCellVolumeCGS)
-
-def _ChandraEmissivity(field, data):
-    logT0 = np.log10(data["Temperature"]) - 7
-    return ((data["NumberDensity"].astype('float64')**2.0) \
-            *(10**(-0.0103*logT0**8 \
-                   +0.0417*logT0**7 \
-                   -0.0636*logT0**6 \
-                   +0.1149*logT0**5 \
-                   -0.3151*logT0**4 \
-                   +0.6655*logT0**3 \
-                   -1.1256*logT0**2 \
-                   +1.0026*logT0**1 \
-                   -0.6984*logT0) \
-              +data["Metallicity"]*10**(0.0305*logT0**11 \
-                                        -0.0045*logT0**10 \
-                                        -0.3620*logT0**9 \
-                                        +0.0513*logT0**8 \
-                                        +1.6669*logT0**7 \
-                                        -0.3854*logT0**6 \
-                                        -3.3604*logT0**5 \
-                                        +0.4728*logT0**4 \
-                                        +4.5774*logT0**3 \
-                                        -2.3661*logT0**2 \
-                                        -1.6667*logT0**1 \
-                                        -0.2193*logT0)))
-def _convertChandraEmissivity(data):
-    return 1.0 #1.0e-23*0.76**2
-add_field("ChandraEmissivity", function=_ChandraEmissivity,
-          convert_function=_convertChandraEmissivity,
-          projection_conversion="1")
-
-def _XRayEmissivity(field, data):
-    return ((data["Density"].astype('float64')**2.0) \
-            *data["Temperature"]**0.5)
-def _convertXRayEmissivity(data):
-    return 2.168e60 #TODO: convert me to constants
-add_field("XRayEmissivity", function=_XRayEmissivity,
-          convert_function=_convertXRayEmissivity,
-          projection_conversion="1")
-
-def _SZKinetic(field, data):
-    vel_axis = data.get_field_parameter('axis')
-    if vel_axis > 2:
-        raise NeedsParameter(['axis'])
-    vel = data["%s-velocity" % ({0:'x',1:'y',2:'z'}[vel_axis])]
-    return (vel*data["Density"])
-def _convertSZKinetic(data):
-    return 0.88*((sigma_thompson/mh)/clight)
-add_field("SZKinetic", function=_SZKinetic,
-          convert_function=_convertSZKinetic,
-          validators=[ValidateParameter('axis')])
-
-def _SZY(field, data):
-    return (data["Density"]*data["Temperature"])
-def _convertSZY(data):
-    conv = (0.88/mh) * (kboltz)/(me * clight*clight) * sigma_thompson
-    return conv
-add_field("SZY", function=_SZY, convert_function=_convertSZY)
-
-def _AveragedDensity(field, data):
-    nx, ny, nz = data["Density"].shape
-    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
-    for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
-        sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
-        new_field += data["Density"][sl] * data["CellMass"][sl]
-        weight_field += data["CellMass"][sl]
-    # Now some fancy footwork
-    new_field2 = np.zeros((nx,ny,nz))
-    new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
-    return new_field2
-add_field("AveragedDensity",
-          function=_AveragedDensity,
-          validators=[ValidateSpatial(1, ["Density"])])
-
-def _DivV(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    ds = div_fac * data['dx'].flat[0]
-    f  = data["x-velocity"][sl_right,1:-1,1:-1]/ds
-    f -= data["x-velocity"][sl_left ,1:-1,1:-1]/ds
-    if data.pf.dimensionality > 1:
-        ds = div_fac * data['dy'].flat[0]
-        f += data["y-velocity"][1:-1,sl_right,1:-1]/ds
-        f -= data["y-velocity"][1:-1,sl_left ,1:-1]/ds
-    if data.pf.dimensionality > 2:
-        ds = div_fac * data['dz'].flat[0]
-        f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
-        f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
-    new_field[1:-1,1:-1,1:-1] = f
-    return new_field
-def _convertDivV(data):
-    return data.convert("cm")**-1.0
-add_field("DivV", function=_DivV,
-            validators=[ValidateSpatial(1,
-            ["x-velocity","y-velocity","z-velocity"])],
-          units=r"\rm{s}^{-1}", take_log=False,
-          convert_function=_convertDivV)
-
-def _AbsDivV(field, data):
-    return np.abs(data['DivV'])
-add_field("AbsDivV", function=_AbsDivV,
-          units=r"\rm{s}^{-1}")
-
-def _Contours(field, data):
-    return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
-          display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
-          validators=[ValidateSpatial(0), ValidateGridType()],
-          take_log=False, display_field=False)
-
-def obtain_velocities(data):
-    return obtain_rv_vec(data)
-
-def _convertSpecificAngularMomentum(data):
-    return data.convert("cm")
-def _convertSpecificAngularMomentumKMSMPC(data):
-    return km_per_cm*data.convert("mpc")
-
-def _SpecificAngularMomentumX(field, data):
-    xv, yv, zv = obtain_velocities(data)
-    rv = obtain_rvec(data)
-    return yv*rv[2,:] - zv*rv[1,:]
-def _SpecificAngularMomentumY(field, data):
-    xv, yv, zv = obtain_velocities(data)
-    rv = obtain_rvec(data)
-    return -(xv*rv[2,:] - zv*rv[0,:])
-def _SpecificAngularMomentumZ(field, data):
-    xv, yv, zv = obtain_velocities(data)
-    rv = obtain_rvec(data)
-    return xv*rv[1,:] - yv*rv[0,:]
-for ax in 'XYZ':
-    n = "SpecificAngularMomentum%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              convert_function=_convertSpecificAngularMomentum,
-              units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-
-def _AngularMomentumX(field, data):
-    return data["CellMass"] * data["SpecificAngularMomentumX"]
-add_field("AngularMomentumX", function=_AngularMomentumX,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
-         validators=[ValidateParameter('center')])
-def _AngularMomentumY(field, data):
-    return data["CellMass"] * data["SpecificAngularMomentumY"]
-add_field("AngularMomentumY", function=_AngularMomentumY,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
-         validators=[ValidateParameter('center')])
-def _AngularMomentumZ(field, data):
-    return data["CellMass"] * data["SpecificAngularMomentumZ"]
-add_field("AngularMomentumZ", function=_AngularMomentumZ,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
-         validators=[ValidateParameter('center')])
-
-def _ParticleSpecificAngularMomentum(field, data):
-    """
-    Calculate the angular of a particle velocity.  Returns a vector for each
-    particle.
-    """
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["particle_velocity_x"] - bv[0]
-    yv = data["particle_velocity_y"] - bv[1]
-    zv = data["particle_velocity_z"] - bv[2]
-    center = data.get_field_parameter('center')
-    coords = np.array([data['particle_position_x'],
-                       data['particle_position_y'],
-                       data['particle_position_z']], dtype='float64')
-    new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - np.reshape(center,new_shape)
-    v_vec = np.array([xv,yv,zv], dtype='float64')
-    return np.cross(r_vec, v_vec, axis=0)
-#add_field("ParticleSpecificAngularMomentum",
-#          function=_ParticleSpecificAngularMomentum, particle_type=True,
-#          convert_function=_convertSpecificAngularMomentum, vector_field=True,
-#          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
-#add_field("ParticleSpecificAngularMomentumKMSMPC",
-#          function=_ParticleSpecificAngularMomentum, particle_type=True,
-#          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
-#          units=r"\rm{km}\rm{Mpc}/\rm{s}", validators=[ValidateParameter('center')])
-
-def _ParticleSpecificAngularMomentumX(field, data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    center = data.get_field_parameter('center')
-    y = data["particle_position_y"] - center[1]
-    z = data["particle_position_z"] - center[2]
-    yv = data["particle_velocity_y"] - bv[1]
-    zv = data["particle_velocity_z"] - bv[2]
-    return yv*z - zv*y
-add_field("ParticleSpecificAngularMomentumX", 
-          function=_ParticleSpecificAngularMomentumX, particle_type=True,
-          convert_function=_convertSpecificAngularMomentum,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-add_field("ParticleSpecificAngularMomentumX_KMSMPC", function=_ParticleSpecificAngularMomentumX, particle_type=True,
-          convert_function=_convertSpecificAngularMomentumKMSMPC,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-def _ParticleSpecificAngularMomentumY(field, data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    center = data.get_field_parameter('center')
-    x = data["particle_position_x"] - center[0]
-    z = data["particle_position_z"] - center[2]
-    xv = data["particle_velocity_x"] - bv[0]
-    zv = data["particle_velocity_z"] - bv[2]
-    return -(xv*z - zv*x)
-add_field("ParticleSpecificAngularMomentumY", 
-          function=_ParticleSpecificAngularMomentumY, particle_type=True,
-          convert_function=_convertSpecificAngularMomentum,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-add_field("ParticleSpecificAngularMomentumY_KMSMPC", function=_ParticleSpecificAngularMomentumY, particle_type=True,
-          convert_function=_convertSpecificAngularMomentumKMSMPC,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-def _ParticleSpecificAngularMomentumZ(field, data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    center = data.get_field_parameter('center')
-    x = data["particle_position_x"] - center[0]
-    y = data["particle_position_y"] - center[1]
-    xv = data["particle_velocity_x"] - bv[0]
-    yv = data["particle_velocity_y"] - bv[1]
-    return xv*y - yv*x
-add_field("ParticleSpecificAngularMomentumZ", 
-          function=_ParticleSpecificAngularMomentumZ, particle_type=True,
-          convert_function=_convertSpecificAngularMomentum,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-add_field("ParticleSpecificAngularMomentumZ_KMSMPC", function=_ParticleSpecificAngularMomentumZ, particle_type=True,
-          convert_function=_convertSpecificAngularMomentumKMSMPC,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-
-def _ParticleAngularMomentum(field, data):
-    return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
-#add_field("ParticleAngularMomentum",
-#          function=_ParticleAngularMomentum, units=r"\rm{g}\/\rm{cm}^2/\rm{s}",
-#          particle_type=True, validators=[ValidateParameter('center')])
-def _ParticleAngularMomentumMSUNKMSMPC(field, data):
-    return data["ParticleMass"] * data["ParticleSpecificAngularMomentumKMSMPC"]
-#add_field("ParticleAngularMomentumMSUNKMSMPC",
-#          function=_ParticleAngularMomentumMSUNKMSMPC,
-#          units=r"M_{\odot}\rm{km}\rm{Mpc}/\rm{s}",
-#          particle_type=True, validators=[ValidateParameter('center')])
-
-def _ParticleAngularMomentumX(field, data):
-    return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
-add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
-         validators=[ValidateParameter('center')])
-def _ParticleAngularMomentumY(field, data):
-    return data["particle_mass"] * data["ParticleSpecificAngularMomentumY"]
-add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
-         validators=[ValidateParameter('center')])
-def _ParticleAngularMomentumZ(field, data):
-    return data["particle_mass"] * data["ParticleSpecificAngularMomentumZ"]
-add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
-         validators=[ValidateParameter('center')])
-
-def get_radius(data, field_prefix):
-    center = data.get_field_parameter("center")
-    DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = np.zeros(data[field_prefix+"x"].shape, dtype='float64')
-    r = radius.copy()
-    if any(data.pf.periodicity):
-        rdw = radius.copy()
-    for i, ax in enumerate('xyz'):
-        np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
-        if data.pf.dimensionality < i+1:
-            break
-        if data.pf.periodicity[i] == True:
-            np.abs(r, r)
-            np.subtract(r, DW[i], rdw)
-            np.abs(rdw, rdw)
-            np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
-        np.add(radius, r, radius)
-    np.sqrt(radius, radius)
-    return radius
-
-def _ParticleRadius(field, data):
-    return get_radius(data, "particle_position_")
-def _Radius(field, data):
-    return get_radius(data, "")
-
-def _ConvertRadiusCGS(data):
-    return data.convert("cm")
-add_field("ParticleRadius", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusCGS, units=r"\rm{cm}",
-          particle_type = True,
-          display_name = "Particle Radius")
-add_field("Radius", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusCGS, units=r"\rm{cm}")
-
-def _ConvertRadiusMpc(data):
-    return data.convert("mpc")
-add_field("RadiusMpc", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusMpc, units=r"\rm{Mpc}",
-          display_name = "Radius")
-add_field("ParticleRadiusMpc", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusMpc, units=r"\rm{Mpc}",
-          particle_type=True,
-          display_name = "Particle Radius")
-
-def _ConvertRadiuskpc(data):
-    return data.convert("kpc")
-add_field("ParticleRadiuskpc", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}",
-          particle_type=True,
-          display_name = "Particle Radius")
-add_field("Radiuskpc", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}",
-          display_name = "Radius")
-
-def _ConvertRadiuskpch(data):
-    return data.convert("kpch")
-add_field("ParticleRadiuskpch", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpch, units=r"\rm{kpc}/\rm{h}",
-          particle_type=True,
-          display_name = "Particle Radius")
-add_field("Radiuskpch", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}/\rm{h}",
-          display_name = "Radius")
-
-def _ConvertRadiuspc(data):
-    return data.convert("pc")
-add_field("ParticleRadiuspc", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuspc, units=r"\rm{pc}",
-          particle_type=True,
-          display_name = "Particle Radius")
-add_field("Radiuspc", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuspc, units=r"\rm{pc}",
-          display_name="Radius")
-
-def _ConvertRadiusAU(data):
-    return data.convert("au")
-add_field("ParticleRadiusAU", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusAU, units=r"\rm{AU}",
-          particle_type=True,
-          display_name = "Particle Radius")
-add_field("RadiusAU", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusAU, units=r"\rm{AU}",
-          display_name = "Radius")
-
-add_field("ParticleRadiusCode", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          particle_type=True,
-          display_name = "Particle Radius (code)")
-add_field("RadiusCode", function=_Radius,
-          validators=[ValidateParameter("center")],
-          display_name = "Radius (code)")
-
-def _RadialVelocity(field, data):
-    normal = data.get_field_parameter("normal")
-    velocities = obtain_rv_vec(data)    
-    theta = data['sph_theta']
-    phi   = data['sph_phi']
-
-    return get_sph_r_component(velocities, theta, phi, normal)
-
-def _RadialVelocityABS(field, data):
-    return np.abs(_RadialVelocity(field, data))
-def _ConvertRadialVelocityKMS(data):
-    return km_per_cm
-add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}")
-add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}")
-add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
-add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
-
-def _ParticleRadiusSpherical(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    sphr = get_sph_r_component(pos, theta, phi, normal)
-    return sphr
-
-add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticleThetaSpherical(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    spht = get_sph_theta_component(pos, theta, phi, normal)
-    return spht
-
-add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticlePhiSpherical(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    sphp = get_sph_phi_component(pos, theta, phi, normal)
-    return sphp
-
-add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticleRadialVelocity(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    vel = "particle_velocity_%s"
-    vel = np.array([data[vel % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    sphr = get_sph_r_component(vel, theta, phi, normal)
-    return sphr
-
-add_field("ParticleRadialVelocity", function=_ParticleRadialVelocity,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticleThetaVelocity(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    vel = "particle_velocity_%s"
-    vel = np.array([data[vel % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    spht = get_sph_theta_component(vel, theta, phi, normal)
-    return spht
-
-add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticlePhiVelocity(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    vel = "particle_velocity_%s"
-    vel = np.array([data[vel % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    sphp = get_sph_phi_component(vel, phi, normal)
-    return sphp
-
-add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
-def _CuttingPlaneVelocityX(field, data):
-    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
-                           for ax in 'xyz']
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] - bv \
-                for ax, bv in zip('xyz', bulk_velocity)])
-    v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-    return np.sum(x_vec * v_vec, axis=-1)
-add_field("CuttingPlaneVelocityX", 
-          function=_CuttingPlaneVelocityX,
-          validators=[ValidateParameter("cp_%s_vec" % ax)
-                      for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
-def _CuttingPlaneVelocityY(field, data):
-    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
-                           for ax in 'xyz']
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] - bv \
-                for ax, bv in zip('xyz', bulk_velocity)])
-    v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-    return np.sum(y_vec * v_vec, axis=-1)
-add_field("CuttingPlaneVelocityY", 
-          function=_CuttingPlaneVelocityY,
-          validators=[ValidateParameter("cp_%s_vec" % ax)
-                      for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
-
-def _CuttingPlaneBx(field, data):
-    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
-                           for ax in 'xyz']
-    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
-    return np.dot(x_vec, b_vec)
-add_field("CuttingPlaneBx", 
-          function=_CuttingPlaneBx,
-          validators=[ValidateParameter("cp_%s_vec" % ax)
-                      for ax in 'xyz'], units=r"\rm{Gauss}")
-def _CuttingPlaneBy(field, data):
-    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
-                           for ax in 'xyz']
-    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
-    return np.dot(y_vec, b_vec)
-add_field("CuttingPlaneBy", 
-          function=_CuttingPlaneBy,
-          validators=[ValidateParameter("cp_%s_vec" % ax)
-                      for ax in 'xyz'], units=r"\rm{Gauss}")
-
-def _MeanMolecularWeight(field,data):
-    return (data["Density"] / (mh *data["NumberDensity"]))
-add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
-
-def _JeansMassMsun(field,data):
-    MJ_constant = (((5.0 * kboltz) / (G * mh)) ** (1.5)) * \
-    (3.0 / (4.0 * np.pi)) ** (0.5) / mass_sun_cgs
-
-    return (MJ_constant *
-            ((data["Temperature"]/data["MeanMolecularWeight"])**(1.5)) *
-            (data["Density"]**(-0.5)))
-add_field("JeansMassMsun",function=_JeansMassMsun,
-          units=r"\rm{M_{\odot}}")
-
-def _pdensity(field, data):
-    pmass = data[('deposit','all_mass')]
-    np.divide(pmass, data["CellVolume"], pmass)
-    return pmass
-add_field("particle_density", function=_pdensity,
-          validators=[ValidateGridType()],
-          display_name=r"\mathrm{Particle}\/\mathrm{Density}")
-
-def _MagneticEnergy(field,data):
-    """This assumes that your front end has provided Bx, By, Bz in
-    units of Gauss. If you use MKS, make sure to write your own
-    MagneticEnergy field to deal with non-unitary \mu_0.
-    """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
-add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"\rm{ergs}\/\rm{cm}^{-3}",
-          display_name=r"\rm{Magnetic}\/\rm{Energy}")
-
-def _BMagnitude(field,data):
-    """This assumes that your front end has provided Bx, By, Bz in
-    units of Gauss. If you use MKS, make sure to write your own
-    BMagnitude field to deal with non-unitary \mu_0.
-    """
-    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
-add_field("BMagnitude",
-          function=_BMagnitude,
-          display_name=r"|B|", units=r"\rm{Gauss}")
-
-def _PlasmaBeta(field,data):
-    """This assumes that your front end has provided Bx, By, Bz in
-    units of Gauss. If you use MKS, make sure to write your own
-    PlasmaBeta field to deal with non-unitary \mu_0.
-    """
-    return data['Pressure']/data['MagneticEnergy']
-add_field("PlasmaBeta",
-          function=_PlasmaBeta,
-          display_name=r"\rm{Plasma}\/\beta", units="")
-
-def _MagneticPressure(field,data):
-    return data['MagneticEnergy']
-add_field("MagneticPressure",
-          function=_MagneticPressure,
-          display_name=r"\rm{Magnetic}\/\rm{Pressure}",
-          units=r"\rm{ergs}\/\rm{cm}^{-3}")
-
-def _BPoloidal(field,data):
-    normal = data.get_field_parameter("normal")
-
-    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
-
-    theta = data['sph_theta']
-    phi   = data['sph_phi']
-
-    return get_sph_theta_component(Bfields, theta, phi, normal)
-
-add_field("BPoloidal", function=_BPoloidal,
-          units=r"\rm{Gauss}",
-          validators=[ValidateParameter("normal")])
-
-def _BToroidal(field,data):
-    normal = data.get_field_parameter("normal")
-
-    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
-
-    phi   = data['sph_phi']
-
-    return get_sph_phi_component(Bfields, phi, normal)
-
-add_field("BToroidal", function=_BToroidal,
-          units=r"\rm{Gauss}",
-          validators=[ValidateParameter("normal")])
-
-def _BRadial(field,data):
-    normal = data.get_field_parameter("normal")
-
-    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
-
-    theta = data['sph_theta']
-    phi   = data['sph_phi']
-
-    return get_sph_r_component(Bfields, theta, phi, normal)
-
-add_field("BRadial", function=_BRadial,
-          units=r"\rm{Gauss}",
-          validators=[ValidateParameter("normal")])
-
-def _VorticitySquared(field, data):
-    mylog.debug("Generating vorticity on %s", data)
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["x-velocity"].shape)
-    dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
-             data["z-velocity"][1:-1,sl_left,1:-1]) \
-             / (div_fac*data["dy"].flat[0])
-    dvydz = (data["y-velocity"][1:-1,1:-1,sl_right] -
-             data["y-velocity"][1:-1,1:-1,sl_left]) \
-             / (div_fac*data["dz"].flat[0])
-    new_field[1:-1,1:-1,1:-1] += (dvzdy - dvydz)**2.0
-    del dvzdy, dvydz
-    dvxdz = (data["x-velocity"][1:-1,1:-1,sl_right] -
-             data["x-velocity"][1:-1,1:-1,sl_left]) \
-             / (div_fac*data["dz"].flat[0])
-    dvzdx = (data["z-velocity"][sl_right,1:-1,1:-1] -
-             data["z-velocity"][sl_left,1:-1,1:-1]) \
-             / (div_fac*data["dx"].flat[0])
-    new_field[1:-1,1:-1,1:-1] += (dvxdz - dvzdx)**2.0
-    del dvxdz, dvzdx
-    dvydx = (data["y-velocity"][sl_right,1:-1,1:-1] -
-             data["y-velocity"][sl_left,1:-1,1:-1]) \
-             / (div_fac*data["dx"].flat[0])
-    dvxdy = (data["x-velocity"][1:-1,sl_right,1:-1] -
-             data["x-velocity"][1:-1,sl_left,1:-1]) \
-             / (div_fac*data["dy"].flat[0])
-    new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
-    del dvydx, dvxdy
-    new_field = np.abs(new_field)
-    return new_field
-def _convertVorticitySquared(data):
-    return data.convert("cm")**-2.0
-add_field("VorticitySquared", function=_VorticitySquared,
-          validators=[ValidateSpatial(1,
-              ["x-velocity","y-velocity","z-velocity"])],
-          units=r"\rm{s}^{-2}",
-          convert_function=_convertVorticitySquared)
-
-def _gradPressureX(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
-    ds = div_fac * data['dx'].flat[0]
-    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
-    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
-    return new_field
-def _gradPressureY(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
-    ds = div_fac * data['dy'].flat[0]
-    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
-    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
-    return new_field
-def _gradPressureZ(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
-    ds = div_fac * data['dz'].flat[0]
-    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
-    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
-    return new_field
-def _convertgradPressure(data):
-    return 1.0/data.convert("cm")
-for ax in 'XYZ':
-    n = "gradPressure%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              convert_function=_convertgradPressure,
-              validators=[ValidateSpatial(1, ["Pressure"])],
-              units=r"\rm{dyne}/\rm{cm}^{3}")
-
-def _gradPressureMagnitude(field, data):
-    return np.sqrt(data["gradPressureX"]**2 +
-                   data["gradPressureY"]**2 +
-                   data["gradPressureZ"]**2)
-add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
-          validators=[ValidateSpatial(1, ["Pressure"])],
-          units=r"\rm{dyne}/\rm{cm}^{3}")
-
-def _gradDensityX(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["Density"].shape, dtype='float64')
-    ds = div_fac * data['dx'].flat[0]
-    new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
-    new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
-    return new_field
-def _gradDensityY(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["Density"].shape, dtype='float64')
-    ds = div_fac * data['dy'].flat[0]
-    new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
-    new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
-    return new_field
-def _gradDensityZ(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["Density"].shape, dtype='float64')
-    ds = div_fac * data['dz'].flat[0]
-    new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
-    new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
-    return new_field
-def _convertgradDensity(data):
-    return 1.0/data.convert("cm")
-for ax in 'XYZ':
-    n = "gradDensity%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              convert_function=_convertgradDensity,
-              validators=[ValidateSpatial(1, ["Density"])],
-              units=r"\rm{g}/\rm{cm}^{4}")
-
-def _gradDensityMagnitude(field, data):
-    return np.sqrt(data["gradDensityX"]**2 +
-                   data["gradDensityY"]**2 +
-                   data["gradDensityZ"]**2)
-add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
-          validators=[ValidateSpatial(1, ["Density"])],
-          units=r"\rm{g}/\rm{cm}^{4}")
-
-def _BaroclinicVorticityX(field, data):
-    rho2 = data["Density"].astype('float64')**2
-    return (data["gradPressureY"] * data["gradDensityZ"] -
-            data["gradPressureZ"] * data["gradDensityY"]) / rho2
-def _BaroclinicVorticityY(field, data):
-    rho2 = data["Density"].astype('float64')**2
-    return (data["gradPressureZ"] * data["gradDensityX"] -
-            data["gradPressureX"] * data["gradDensityZ"]) / rho2
-def _BaroclinicVorticityZ(field, data):
-    rho2 = data["Density"].astype('float64')**2
-    return (data["gradPressureX"] * data["gradDensityY"] -
-            data["gradPressureY"] * data["gradDensityX"]) / rho2
-for ax in 'XYZ':
-    n = "BaroclinicVorticity%s" % ax
-    add_field(n, function=eval("_%s" % n),
-          validators=[ValidateSpatial(1, ["Density", "Pressure"])],
-          units=r"\rm{s}^{-1}")
-
-def _BaroclinicVorticityMagnitude(field, data):
-    return np.sqrt(data["BaroclinicVorticityX"]**2 +
-                   data["BaroclinicVorticityY"]**2 +
-                   data["BaroclinicVorticityZ"]**2)
-add_field("BaroclinicVorticityMagnitude",
-          function=_BaroclinicVorticityMagnitude,
-          validators=[ValidateSpatial(1, ["Density", "Pressure"])],
-          units=r"\rm{s}^{-1}")
-
-def _VorticityX(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
-    new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
-                                 data["z-velocity"][1:-1,sl_left,1:-1]) \
-                                 / (div_fac*data["dy"].flat[0])
-    new_field[1:-1,1:-1,1:-1] -= (data["y-velocity"][1:-1,1:-1,sl_right] -
-                                  data["y-velocity"][1:-1,1:-1,sl_left]) \
-                                  / (div_fac*data["dz"].flat[0])
-    return new_field
-def _VorticityY(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
-    new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
-                                 data["x-velocity"][1:-1,1:-1,sl_left]) \
-                                 / (div_fac*data["dz"].flat[0])
-    new_field[1:-1,1:-1,1:-1] -= (data["z-velocity"][sl_right,1:-1,1:-1] -
-                                  data["z-velocity"][sl_left,1:-1,1:-1]) \
-                                  / (div_fac*data["dx"].flat[0])
-    return new_field
-def _VorticityZ(field, data):
-    # We need to set up stencils
-    if data.pf["HydroMethod"] == 2:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(1,-1,None)
-        div_fac = 1.0
-    else:
-        sl_left = slice(None,-2,None)
-        sl_right = slice(2,None,None)
-        div_fac = 2.0
-    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
-    new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
-                                 data["y-velocity"][sl_left,1:-1,1:-1]) \
-                                 / (div_fac*data["dx"].flat[0])
-    new_field[1:-1,1:-1,1:-1] -= (data["x-velocity"][1:-1,sl_right,1:-1] -
-                                  data["x-velocity"][1:-1,sl_left,1:-1]) \
-                                  / (div_fac*data["dy"].flat[0])
-    return new_field
-def _convertVorticity(data):
-    return 1.0/data.convert("cm")
-for ax in 'XYZ':
-    n = "Vorticity%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              convert_function=_convertVorticity,
-              validators=[ValidateSpatial(1, 
-                          ["x-velocity", "y-velocity", "z-velocity"])],
-              units=r"\rm{s}^{-1}")
-
-def _VorticityMagnitude(field, data):
-    return np.sqrt(data["VorticityX"]**2 +
-                   data["VorticityY"]**2 +
-                   data["VorticityZ"]**2)
-add_field("VorticityMagnitude", function=_VorticityMagnitude,
-          validators=[ValidateSpatial(1, 
-                      ["x-velocity", "y-velocity", "z-velocity"])],
-          units=r"\rm{s}^{-1}")
-
-def _VorticityStretchingX(field, data):
-    return data["DivV"] * data["VorticityX"]
-def _VorticityStretchingY(field, data):
-    return data["DivV"] * data["VorticityY"]
-def _VorticityStretchingZ(field, data):
-    return data["DivV"] * data["VorticityZ"]
-for ax in 'XYZ':
-    n = "VorticityStretching%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              validators=[ValidateSpatial(0)])
-def _VorticityStretchingMagnitude(field, data):
-    return np.sqrt(data["VorticityStretchingX"]**2 +
-                   data["VorticityStretchingY"]**2 +
-                   data["VorticityStretchingZ"]**2)
-add_field("VorticityStretchingMagnitude", 
-          function=_VorticityStretchingMagnitude,
-          validators=[ValidateSpatial(1, 
-                      ["x-velocity", "y-velocity", "z-velocity"])],
-          units=r"\rm{s}^{-1}")
-
-def _VorticityGrowthX(field, data):
-    return -data["VorticityStretchingX"] - data["BaroclinicVorticityX"]
-def _VorticityGrowthY(field, data):
-    return -data["VorticityStretchingY"] - data["BaroclinicVorticityY"]
-def _VorticityGrowthZ(field, data):
-    return -data["VorticityStretchingZ"] - data["BaroclinicVorticityZ"]
-for ax in 'XYZ':
-    n = "VorticityGrowth%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              validators=[ValidateSpatial(1, 
-                          ["x-velocity", "y-velocity", "z-velocity"])],
-              units=r"\rm{s}^{-2}")
-def _VorticityGrowthMagnitude(field, data):
-    result = np.sqrt(data["VorticityGrowthX"]**2 +
-                     data["VorticityGrowthY"]**2 +
-                     data["VorticityGrowthZ"]**2)
-    dot = np.zeros(result.shape)
-    for ax in "XYZ":
-        dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = np.sign(dot) * result
-    return result
-add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
-          validators=[ValidateSpatial(1, 
-                      ["x-velocity", "y-velocity", "z-velocity"])],
-          units=r"\rm{s}^{-1}",
-          take_log=False)
-def _VorticityGrowthMagnitudeABS(field, data):
-    return np.sqrt(data["VorticityGrowthX"]**2 +
-                   data["VorticityGrowthY"]**2 +
-                   data["VorticityGrowthZ"]**2)
-add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
-          validators=[ValidateSpatial(1, 
-                      ["x-velocity", "y-velocity", "z-velocity"])],
-          units=r"\rm{s}^{-1}")
-
-def _VorticityGrowthTimescale(field, data):
-    domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
-    domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
-    domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
-add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
-          validators=[ValidateSpatial(1, 
-                      ["x-velocity", "y-velocity", "z-velocity"])],
-          units=r"\rm{s}")
-
-########################################################################
-# With radiation pressure
-########################################################################
-
-def _VorticityRadPressureX(field, data):
-    rho = data["Density"].astype('float64')
-    return (data["RadAccel2"] * data["gradDensityZ"] -
-            data["RadAccel3"] * data["gradDensityY"]) / rho
-def _VorticityRadPressureY(field, data):
-    rho = data["Density"].astype('float64')
-    return (data["RadAccel3"] * data["gradDensityX"] -
-            data["RadAccel1"] * data["gradDensityZ"]) / rho
-def _VorticityRadPressureZ(field, data):
-    rho = data["Density"].astype('float64')
-    return (data["RadAccel1"] * data["gradDensityY"] -
-            data["RadAccel2"] * data["gradDensityX"]) / rho
-def _convertRadAccel(data):
-    return data.convert("x-velocity")/data.convert("Time")
-for ax in 'XYZ':
-    n = "VorticityRadPressure%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              convert_function=_convertRadAccel,
-              validators=[ValidateSpatial(1, 
-                   ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
-              units=r"\rm{s}^{-1}")
-
-def _VorticityRadPressureMagnitude(field, data):
-    return np.sqrt(data["VorticityRadPressureX"]**2 +
-                   data["VorticityRadPressureY"]**2 +
-                   data["VorticityRadPressureZ"]**2)
-add_field("VorticityRadPressureMagnitude",
-          function=_VorticityRadPressureMagnitude,
-          validators=[ValidateSpatial(1, 
-                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
-          units=r"\rm{s}^{-1}")
-
-def _VorticityRPGrowthX(field, data):
-    return -data["VorticityStretchingX"] - data["BaroclinicVorticityX"] \
-           -data["VorticityRadPressureX"]
-def _VorticityRPGrowthY(field, data):
-    return -data["VorticityStretchingY"] - data["BaroclinicVorticityY"] \
-           -data["VorticityRadPressureY"]
-def _VorticityRPGrowthZ(field, data):
-    return -data["VorticityStretchingZ"] - data["BaroclinicVorticityZ"] \
-           -data["VorticityRadPressureZ"]
-for ax in 'XYZ':
-    n = "VorticityRPGrowth%s" % ax
-    add_field(n, function=eval("_%s" % n),
-              validators=[ValidateSpatial(1, 
-                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
-              units=r"\rm{s}^{-1}")
-def _VorticityRPGrowthMagnitude(field, data):
-    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
-                     data["VorticityRPGrowthY"]**2 +
-                     data["VorticityRPGrowthZ"]**2)
-    dot = np.zeros(result.shape)
-    for ax in "XYZ":
-        dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = np.sign(dot) * result
-    return result
-add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
-          validators=[ValidateSpatial(1, 
-                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
-          units=r"\rm{s}^{-1}",
-          take_log=False)
-def _VorticityRPGrowthMagnitudeABS(field, data):
-    return np.sqrt(data["VorticityRPGrowthX"]**2 +
-                   data["VorticityRPGrowthY"]**2 +
-                   data["VorticityRPGrowthZ"]**2)
-add_field("VorticityRPGrowthMagnitudeABS", 
-          function=_VorticityRPGrowthMagnitudeABS,
-          validators=[ValidateSpatial(1, 
-                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
-          units=r"\rm{s}^{-1}")
-
-def _VorticityRPGrowthTimescale(field, data):
-    domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
-    domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
-    domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
-add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
-          validators=[ValidateSpatial(1, 
-                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
-          units=r"\rm{s}^{-1}")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/949e4792c577/
Changeset:   949e4792c577
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 17:47:22
Summary:     Moving particle fields into closures.
Affected #:  3 files

diff -r 785be13d2f5a3c4e54bd6866c949e901068cffee -r 949e4792c5776a0ddfa00ffd722f9ca366375a4c yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -31,6 +31,9 @@
     mass_hydrogen_cgs, \
     mass_sun_cgs, \
     mh
+from .universal_fields import \
+    get_radius, \
+    _get_conv
 
 def _field_concat(fname):
     def _AllFields(field, data):
@@ -153,7 +156,6 @@
 
     return list(set(registry.keys()).difference(orig))
 
-
 def particle_scalar_functions(ptype, coord_name, vel_name, registry):
 
     # Now we have to set up the various velocity and coordinate things.  In the
@@ -194,3 +196,254 @@
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
     return list(set(registry.keys()).difference(orig))
+
+def standard_particle_fields(registry, ptype,
+                             spos = "particle_position_%s",
+                             svel = "particle_velocity_%s"):
+    # This function will set things up based on the scalar fields and standard
+    # yt field names.
+    
+    def _ParticleVelocityMagnitude(field, data):
+        bulk_velocity = data.get_field_parameter("bulk_velocity")
+        if bulk_velocity == None:
+            bulk_velocity = np.zeros(3)
+        return ( (data[ptype, svel % 'x']-bulk_velocity[0])**2.0 + \
+                 (data[ptype, svel % 'y']-bulk_velocity[1])**2.0 + \
+                 (data[ptype, svel % 'z']-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    registry.add_field((ptype, "ParticleVelocityMagnitude"),
+              function=_ParticleVelocityMagnitude,
+              particle_type=True, 
+              take_log=False, units=r"\rm{cm}/\rm{s}")
+
+    # Angular momentum
+    def _ParticleSpecificAngularMomentumX(field, data):
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype='float64')
+        center = data.get_field_parameter('center')
+        y = data[ptype, spos % 'y'] - center[1]
+        z = data[ptype, spos % 'z'] - center[2]
+        yv = data[ptype, svel % 'y'] - bv[1]
+        zv = data[ptype, svel % 'z'] - bv[2]
+        return yv*z - zv*y
+    registry.add_field((ptype, "ParticleSpecificAngularMomentumX"), 
+              function=_ParticleSpecificAngularMomentumX,
+              particle_type=True,
+              convert_function=_get_conv("cm"),
+              units=r"\rm{cm}^2/\rm{s}",
+              validators=[ValidateParameter("center")])
+    def _ParticleSpecificAngularMomentumY(field, data):
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype='float64')
+        center = data.get_field_parameter('center')
+        x = data[ptype, spos % 'x'] - center[0]
+        z = data[ptype, spos % 'z'] - center[2]
+        xv = data[ptype, svel % 'x'] - bv[0]
+        zv = data[ptype, svel % 'z'] - bv[2]
+        return -(xv*z - zv*x)
+    registry.add_field((ptype, "ParticleSpecificAngularMomentumY"), 
+              function=_ParticleSpecificAngularMomentumY,
+              particle_type=True,
+              convert_function=_get_conv("cm"),
+              units=r"\rm{cm}^2/\rm{s}",
+              validators=[ValidateParameter("center")])
+    def _ParticleSpecificAngularMomentumZ(field, data):
+        if data.has_field_parameter("bulk_velocity"):
+            bv = data.get_field_parameter("bulk_velocity")
+        else: bv = np.zeros(3, dtype='float64')
+        center = data.get_field_parameter('center')
+        x = data[ptype, spos % 'x'] - center[0]
+        y = data[ptype, spos % 'y'] - center[1]
+        xv = data[ptype, svel % 'x'] - bv[0]
+        yv = data[ptype, svel % 'y'] - bv[1]
+        return xv*y - yv*x
+    registry.add_field((ptype, "ParticleSpecificAngularMomentumZ"), 
+              function=_ParticleSpecificAngularMomentumZ,
+              particle_type=True,
+              convert_function=_get_conv("cm"),
+              units=r"\rm{cm}^2/\rm{s}",
+              validators=[ValidateParameter("center")])
+
+    def _ParticleAngularMomentumX(field, data):
+        return data[ptype, "particle_mass"] * \
+            data[ptype, "ParticleSpecificAngularMomentumX"]
+    registry.add_field((ptype, "ParticleAngularMomentumX"),
+             function=_ParticleAngularMomentumX,
+             units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
+             validators=[ValidateParameter('center')])
+    def _ParticleAngularMomentumY(field, data):
+        return data[ptype, "particle_mass"] * \
+            data[ptype, "ParticleSpecificAngularMomentumY"]
+    registry.add_field((ptype, "ParticleAngularMomentumY"),
+             function=_ParticleAngularMomentumY,
+             units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
+             validators=[ValidateParameter('center')])
+    def _ParticleAngularMomentumZ(field, data):
+        return data[ptype, "particle_mass"] * \
+            data[ptype, "ParticleSpecificAngularMomentumZ"]
+    registry.add_field((ptype, "ParticleAngularMomentumZ"),
+             function=_ParticleAngularMomentumZ,
+             units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
+             validators=[ValidateParameter('center')])
+
+    def _ParticleRadius(field, data):
+        return get_radius(data, "particle_position_")
+
+    registry.add_field((ptype, "ParticleRadius"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              convert_function = _get_conv("cm"), units=r"\rm{cm}",
+              particle_type = True,
+              display_name = "Particle Radius")
+    registry.add_field((ptype, "ParticleRadiusMpc"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              convert_function = _get_conv("mpc"), units=r"\rm{Mpc}",
+              particle_type=True,
+              display_name = "Particle Radius")
+    registry.add_field((ptype, "ParticleRadiuskpc"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              convert_function = _get_conv("kpc"), units=r"\rm{kpc}",
+              particle_type=True,
+              display_name = "Particle Radius")
+    registry.add_field((ptype, "ParticleRadiuskpch"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              convert_function = _get_conv("kpch"), units=r"\rm{kpc}/\rm{h}",
+              particle_type=True,
+              display_name = "Particle Radius")
+    registry.add_field((ptype, "ParticleRadiuspc"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              convert_function = _get_conv("pc"), units=r"\rm{pc}",
+              particle_type=True,
+              display_name = "Particle Radius")
+    registry.add_field((ptype, "ParticleRadiusAU"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              convert_function = _get_conv("au"), units=r"\rm{AU}",
+              particle_type=True,
+              display_name = "Particle Radius")
+    registry.add_field((ptype, "ParticleRadiusCode"),
+              function=_ParticleRadius,
+              validators=[ValidateParameter("center")],
+              particle_type=True,
+              display_name = "Particle Radius (code)")
+    def _ParticleRadiusSpherical(field, data):
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        sphr = get_sph_r_component(pos, theta, phi, normal)
+        return sphr
+
+    registry.add_field((ptype, "ParticleRadiusSpherical"),
+              function=_ParticleRadiusSpherical,
+              particle_type=True, units=r"\rm{cm}/\rm{s}",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _ParticleThetaSpherical(field, data):
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        spht = get_sph_theta_component(pos, theta, phi, normal)
+        return spht
+
+    registry.add_field((ptype, "ParticleThetaSpherical"),
+              function=_ParticleThetaSpherical,
+              particle_type=True, units=r"\rm{cm}/\rm{s}",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _ParticlePhiSpherical(field, data):
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        sphp = get_sph_phi_component(pos, theta, phi, normal)
+        return sphp
+
+    registry.add_field((ptype, "ParticlePhiSpherical"),
+              function=_ParticleThetaSpherical,
+              particle_type=True, units=r"\rm{cm}/\rm{s}",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _ParticleRadialVelocity(field, data):
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        vel = svel
+        vel = np.array([data[ptype, vel % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        sphr = get_sph_r_component(vel, theta, phi, normal)
+        return sphr
+
+    registry.add_field((ptype, "ParticleRadialVelocity"),
+              function=_ParticleRadialVelocity,
+              particle_type=True, units=r"\rm{cm}/\rm{s}",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _ParticleThetaVelocity(field, data):
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = spos
+        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        vel = svel
+        vel = np.array([data[ptype, vel % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        spht = get_sph_theta_component(vel, theta, phi, normal)
+        return spht
+
+    registry.add_field((ptype, "ParticleThetaVelocity"),
+              function=_ParticleThetaVelocity,
+              particle_type=True, units=r"\rm{cm}/\rm{s}",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+
+    def _ParticlePhiVelocity(field, data):
+        normal = data.get_field_parameter('normal')
+        center = data.get_field_parameter('center')
+        bv = data.get_field_parameter("bulk_velocity")
+        pos = np.array([data[ptype, spos % ax] for ax in "xyz"])
+        vel = np.array([data[ptype, svel % ax] for ax in "xyz"])
+        theta = get_sph_theta(pos, center)
+        phi = get_sph_phi(pos, center)
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
+        sphp = get_sph_phi_component(vel, phi, normal)
+        return sphp
+
+    registry.add_field((ptype, "ParticlePhiVelocity"),
+              function=_ParticleThetaVelocity,
+              particle_type=True, units=r"\rm{cm}/\rm{s}",
+              validators=[ValidateParameter("normal"), 
+                          ValidateParameter("center")])
+

diff -r 785be13d2f5a3c4e54bd6866c949e901068cffee -r 949e4792c5776a0ddfa00ffd722f9ca366375a4c yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -49,6 +49,11 @@
      speed_of_light_cgs, \
      km_per_cm, keV_per_K
 
+def _get_conv(unit):
+    def _conv(data):
+        return data.convert(unit)
+    return _conv
+
 from yt.utilities.math_utils import \
     get_sph_r_component, \
     get_sph_theta_component, \
@@ -133,18 +138,6 @@
           convert_function=_convertCourantTimeStep,
           units=r"$\rm{s}$")
 
-def _ParticleVelocityMagnitude(field, data):
-    """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
-             (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
-             (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
-add_field("ParticleVelocityMagnitude", function=_ParticleVelocityMagnitude,
-          particle_type=True, 
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
     velocities = obtain_rv_vec(data)
@@ -598,11 +591,6 @@
 def obtain_velocities(data):
     return obtain_rv_vec(data)
 
-def _convertSpecificAngularMomentum(data):
-    return data.convert("cm")
-def _convertSpecificAngularMomentumKMSMPC(data):
-    return km_per_cm*data.convert("mpc")
-
 def _SpecificAngularMomentumX(field, data):
     xv, yv, zv = obtain_velocities(data)
     rv = obtain_rvec(data)
@@ -618,7 +606,7 @@
 for ax in 'XYZ':
     n = "SpecificAngularMomentum%s" % ax
     add_field(n, function=eval("_%s" % n),
-              convert_function=_convertSpecificAngularMomentum,
+              convert_function=_get_conv("cm"),
               units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
 
 def _AngularMomentumX(field, data):
@@ -637,114 +625,6 @@
          units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
          validators=[ValidateParameter('center')])
 
-def _ParticleSpecificAngularMomentum(field, data):
-    """
-    Calculate the angular of a particle velocity.  Returns a vector for each
-    particle.
-    """
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["particle_velocity_x"] - bv[0]
-    yv = data["particle_velocity_y"] - bv[1]
-    zv = data["particle_velocity_z"] - bv[2]
-    center = data.get_field_parameter('center')
-    coords = np.array([data['particle_position_x'],
-                       data['particle_position_y'],
-                       data['particle_position_z']], dtype='float64')
-    new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - np.reshape(center,new_shape)
-    v_vec = np.array([xv,yv,zv], dtype='float64')
-    return np.cross(r_vec, v_vec, axis=0)
-#add_field("ParticleSpecificAngularMomentum",
-#          function=_ParticleSpecificAngularMomentum, particle_type=True,
-#          convert_function=_convertSpecificAngularMomentum, vector_field=True,
-#          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
-#add_field("ParticleSpecificAngularMomentumKMSMPC",
-#          function=_ParticleSpecificAngularMomentum, particle_type=True,
-#          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
-#          units=r"\rm{km}\rm{Mpc}/\rm{s}", validators=[ValidateParameter('center')])
-
-def _ParticleSpecificAngularMomentumX(field, data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    center = data.get_field_parameter('center')
-    y = data["particle_position_y"] - center[1]
-    z = data["particle_position_z"] - center[2]
-    yv = data["particle_velocity_y"] - bv[1]
-    zv = data["particle_velocity_z"] - bv[2]
-    return yv*z - zv*y
-add_field("ParticleSpecificAngularMomentumX", 
-          function=_ParticleSpecificAngularMomentumX, particle_type=True,
-          convert_function=_convertSpecificAngularMomentum,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-add_field("ParticleSpecificAngularMomentumX_KMSMPC", function=_ParticleSpecificAngularMomentumX, particle_type=True,
-          convert_function=_convertSpecificAngularMomentumKMSMPC,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-def _ParticleSpecificAngularMomentumY(field, data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    center = data.get_field_parameter('center')
-    x = data["particle_position_x"] - center[0]
-    z = data["particle_position_z"] - center[2]
-    xv = data["particle_velocity_x"] - bv[0]
-    zv = data["particle_velocity_z"] - bv[2]
-    return -(xv*z - zv*x)
-add_field("ParticleSpecificAngularMomentumY", 
-          function=_ParticleSpecificAngularMomentumY, particle_type=True,
-          convert_function=_convertSpecificAngularMomentum,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-add_field("ParticleSpecificAngularMomentumY_KMSMPC", function=_ParticleSpecificAngularMomentumY, particle_type=True,
-          convert_function=_convertSpecificAngularMomentumKMSMPC,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-def _ParticleSpecificAngularMomentumZ(field, data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    center = data.get_field_parameter('center')
-    x = data["particle_position_x"] - center[0]
-    y = data["particle_position_y"] - center[1]
-    xv = data["particle_velocity_x"] - bv[0]
-    yv = data["particle_velocity_y"] - bv[1]
-    return xv*y - yv*x
-add_field("ParticleSpecificAngularMomentumZ", 
-          function=_ParticleSpecificAngularMomentumZ, particle_type=True,
-          convert_function=_convertSpecificAngularMomentum,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-add_field("ParticleSpecificAngularMomentumZ_KMSMPC", function=_ParticleSpecificAngularMomentumZ, particle_type=True,
-          convert_function=_convertSpecificAngularMomentumKMSMPC,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-
-def _ParticleAngularMomentum(field, data):
-    return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
-#add_field("ParticleAngularMomentum",
-#          function=_ParticleAngularMomentum, units=r"\rm{g}\/\rm{cm}^2/\rm{s}",
-#          particle_type=True, validators=[ValidateParameter('center')])
-def _ParticleAngularMomentumMSUNKMSMPC(field, data):
-    return data["ParticleMass"] * data["ParticleSpecificAngularMomentumKMSMPC"]
-#add_field("ParticleAngularMomentumMSUNKMSMPC",
-#          function=_ParticleAngularMomentumMSUNKMSMPC,
-#          units=r"M_{\odot}\rm{km}\rm{Mpc}/\rm{s}",
-#          particle_type=True, validators=[ValidateParameter('center')])
-
-def _ParticleAngularMomentumX(field, data):
-    return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
-add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
-         validators=[ValidateParameter('center')])
-def _ParticleAngularMomentumY(field, data):
-    return data["particle_mass"] * data["ParticleSpecificAngularMomentumY"]
-add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
-         validators=[ValidateParameter('center')])
-def _ParticleAngularMomentumZ(field, data):
-    return data["particle_mass"] * data["ParticleSpecificAngularMomentumZ"]
-add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
-         validators=[ValidateParameter('center')])
-
 def get_radius(data, field_prefix):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
@@ -766,86 +646,34 @@
     np.sqrt(radius, radius)
     return radius
 
-def _ParticleRadius(field, data):
-    return get_radius(data, "particle_position_")
 def _Radius(field, data):
     return get_radius(data, "")
 
-def _ConvertRadiusCGS(data):
-    return data.convert("cm")
-add_field("ParticleRadius", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusCGS, units=r"\rm{cm}",
-          particle_type = True,
-          display_name = "Particle Radius")
-add_field("Radius", function=_Radius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusCGS, units=r"\rm{cm}")
-
-def _ConvertRadiusMpc(data):
-    return data.convert("mpc")
 add_field("RadiusMpc", function=_Radius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusMpc, units=r"\rm{Mpc}",
+          convert_function = _get_conv("mpc"), units=r"\rm{Mpc}",
           display_name = "Radius")
-add_field("ParticleRadiusMpc", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusMpc, units=r"\rm{Mpc}",
-          particle_type=True,
-          display_name = "Particle Radius")
 
-def _ConvertRadiuskpc(data):
-    return data.convert("kpc")
-add_field("ParticleRadiuskpc", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}",
-          particle_type=True,
-          display_name = "Particle Radius")
 add_field("Radiuskpc", function=_Radius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}",
+          convert_function = _get_conv("kpc"), units=r"\rm{kpc}",
           display_name = "Radius")
 
-def _ConvertRadiuskpch(data):
-    return data.convert("kpch")
-add_field("ParticleRadiuskpch", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpch, units=r"\rm{kpc}/\rm{h}",
-          particle_type=True,
-          display_name = "Particle Radius")
 add_field("Radiuskpch", function=_Radius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}/\rm{h}",
+          convert_function = _get_conv("kpch"), units=r"\rm{kpc}/\rm{h}",
           display_name = "Radius")
 
-def _ConvertRadiuspc(data):
-    return data.convert("pc")
-add_field("ParticleRadiuspc", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuspc, units=r"\rm{pc}",
-          particle_type=True,
-          display_name = "Particle Radius")
 add_field("Radiuspc", function=_Radius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuspc, units=r"\rm{pc}",
+          convert_function = _get_conv("pc"), units=r"\rm{pc}",
           display_name="Radius")
 
-def _ConvertRadiusAU(data):
-    return data.convert("au")
-add_field("ParticleRadiusAU", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusAU, units=r"\rm{AU}",
-          particle_type=True,
-          display_name = "Particle Radius")
 add_field("RadiusAU", function=_Radius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiusAU, units=r"\rm{AU}",
+          convert_function = _get_conv("au"), units=r"\rm{AU}",
           display_name = "Radius")
 
-add_field("ParticleRadiusCode", function=_ParticleRadius,
-          validators=[ValidateParameter("center")],
-          particle_type=True,
-          display_name = "Particle Radius (code)")
 add_field("RadiusCode", function=_Radius,
           validators=[ValidateParameter("center")],
           display_name = "Radius (code)")
@@ -871,118 +699,6 @@
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
           convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 
-def _ParticleRadiusSpherical(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    sphr = get_sph_r_component(pos, theta, phi, normal)
-    return sphr
-
-add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticleThetaSpherical(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    spht = get_sph_theta_component(pos, theta, phi, normal)
-    return spht
-
-add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticlePhiSpherical(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    sphp = get_sph_phi_component(pos, theta, phi, normal)
-    return sphp
-
-add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticleRadialVelocity(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    vel = "particle_velocity_%s"
-    vel = np.array([data[vel % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    sphr = get_sph_r_component(vel, theta, phi, normal)
-    return sphr
-
-add_field("ParticleRadialVelocity", function=_ParticleRadialVelocity,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticleThetaVelocity(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    vel = "particle_velocity_%s"
-    vel = np.array([data[vel % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    spht = get_sph_theta_component(vel, theta, phi, normal)
-    return spht
-
-add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
-def _ParticlePhiVelocity(field, data):
-    normal = data.get_field_parameter('normal')
-    center = data.get_field_parameter('center')
-    bv = data.get_field_parameter("bulk_velocity")
-    pos = "particle_position_%s"
-    pos = np.array([data[pos % ax] for ax in "xyz"])
-    vel = "particle_velocity_%s"
-    vel = np.array([data[vel % ax] for ax in "xyz"])
-    theta = get_sph_theta(pos, center)
-    phi = get_sph_phi(pos, center)
-    pos = pos - np.reshape(center, (3, 1))
-    vel = vel - np.reshape(bv, (3, 1))
-    sphp = get_sph_phi_component(vel, phi, normal)
-    return sphp
-
-add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
-          particle_type=True, units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("normal"), 
-                      ValidateParameter("center")])
-
 def _TangentialVelocity(field, data):
     return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)

diff -r 785be13d2f5a3c4e54bd6866c949e901068cffee -r 949e4792c5776a0ddfa00ffd722f9ca366375a4c yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -28,7 +28,8 @@
 import yt.fields.universal_fields
 from yt.fields.particle_fields import \
     particle_deposition_functions, \
-    particle_vector_functions
+    particle_vector_functions, \
+    standard_particle_fields
 from yt.utilities.physical_constants import \
     mh, \
     mass_sun_cgs
@@ -573,3 +574,5 @@
 
     registry.add_field((ptype, "particle_mass"), function=NullFunc, 
               particle_type=True, convert_function = _convertParticleMass)
+
+    standard_particle_fields(registry, ptype)


https://bitbucket.org/yt_analysis/yt-3.0/commits/88b546f8ddc0/
Changeset:   88b546f8ddc0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 21:40:38
Summary:     Skip popping fields in particle_unions.
Affected #:  2 files

diff -r 949e4792c5776a0ddfa00ffd722f9ca366375a4c -r 88b546f8ddc0da3f6a52c8e2b1f44be4334e385d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -340,7 +340,8 @@
         # Give ourselves a chance to add them here, first, then...
         # ...if we can't find them, we set them up as defaults.
         self.h._setup_particle_types([union.name])
-        self.h._setup_unknown_fields(fields, self.field_info)
+        self.h._setup_unknown_fields(fields, self.field_info,
+                                     skip_removal = True)
 
     def _setup_particle_type(self, ptype):
         mylog.debug("Don't know what to do with %s", ptype)

diff -r 949e4792c5776a0ddfa00ffd722f9ca366375a4c -r 88b546f8ddc0da3f6a52c8e2b1f44be4334e385d yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -176,9 +176,8 @@
             df += self.pf._setup_particle_type(ptype)
         self._derived_fields_add(df)
 
-    def _setup_unknown_fields(self, list_of_fields = None,
-                              field_info = None,
-                              skip_particles = False):
+    def _setup_unknown_fields(self, list_of_fields = None, field_info = None,
+                              skip_removal = False):
         field_info = field_info or self.pf._fieldinfo_known
         field_list = list_of_fields or self.field_list
         dftype = self.parameter_file.default_fluid_type
@@ -187,14 +186,14 @@
             # By allowing a backup, we don't mandate that it's found in our
             # current field info.  This means we'll instead simply override
             # it.
-            ff = self.parameter_file.field_info.pop(field, None)
+            if not skip_removal:
+                ff = self.parameter_file.field_info.pop(field, None)
             if field not in field_info:
                 # Now we check if it's a gas field or what ...
                 if isinstance(field, tuple) and field[0] in self.pf.particle_types:
                     particle_type = True
                 else:
                     particle_type = False
-                if particle_type and skip_particles: continue
                 if isinstance(field, tuple) and not particle_type and \
                    field[1] in field_info:
                     mylog.debug("Adding known field %s to list of fields", field)


https://bitbucket.org/yt_analysis/yt-3.0/commits/4e460a5ea4a8/
Changeset:   4e460a5ea4a8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 21:43:19
Summary:     Don't need to re-add Mass.
Affected #:  1 file

diff -r 88b546f8ddc0da3f6a52c8e2b1f44be4334e385d -r 4e460a5ea4a86c69e38f3a9be3214ad6ac51e692 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -84,9 +84,7 @@
                                   registry)
     particle_scalar_functions(ptype, "Coordinates", "Velocities",
                               registry)
-
-    for fname in ["Coordinates", "Velocities", "ParticleIDs", "Mass",
-                  "Epsilon", "Phi"]:
+    for fname in ["Coordinates", "ParticleIDs", "Epsilon", "Phi"]:
         registry.add_field((ptype, fname), function=NullFunc,
                 particle_type = True)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/81d87de58877/
Changeset:   81d87de58877
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 21:50:28
Summary:     Adding a standard_particle_fields call to all the frontends.
Affected #:  4 files

diff -r 4e460a5ea4a86c69e38f3a9be3214ad6ac51e692 -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -30,6 +30,8 @@
     KnownARTIOFields, \
     b2t, \
     _setup_particle_fields
+from yt.fields.particle_fields import \
+    standard_particle_fields
 
 
 from yt.funcs import *
@@ -523,6 +525,7 @@
     def _setup_particle_type(self, ptype):
         orig = set(self.field_info.items())
         _setup_particle_fields(self.field_info, ptype)
+        standard_particle_fields(self.field_info, ptype)
         return [n for n, v in set(self.field_info.items()).difference(orig)]
 
     @classmethod

diff -r 4e460a5ea4a86c69e38f3a9be3214ad6ac51e692 -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -45,6 +45,8 @@
     KnownRAMSESFields, \
     create_cooling_fields, \
     _setup_particle_fields
+from yt.fields.particle_fields import \
+    standard_particle_fields
 
 class RAMSESDomainFile(object):
     _last_mask = None
@@ -511,6 +513,7 @@
     def _setup_particle_type(self, ptype):
         orig = set(self.field_info.items())
         _setup_particle_fields(self.field_info, ptype)
+        standard_particle_fields(self.field_info, ptype)
         return [n for n, v in set(self.field_info.items()).difference(orig)]
 
     @classmethod

diff -r 4e460a5ea4a86c69e38f3a9be3214ad6ac51e692 -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -47,7 +47,8 @@
     NullFunc, \
     TranslationFunc
 from yt.fields.particle_fields import \
-    particle_deposition_functions
+    particle_deposition_functions, \
+    standard_particle_fields
 
 
 class ParticleFile(object):
@@ -126,6 +127,7 @@
         self.field_info.add_field((ptype, "particle_index"),
             function = TranslationFunc((ptype, "ParticleIDs")),
             particle_type = True)
+        standard_particle_fields(self.field_info, ptype)
         _setup_particle_fields(self.field_info, ptype)
         return [n for n, v in set(self.field_info.items()).difference(orig)]
 

diff -r 4e460a5ea4a86c69e38f3a9be3214ad6ac51e692 -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -751,13 +751,15 @@
     over_refine_factor = 1
 
     def _setup_particle_type(self, ptype):
-        df = particle_vector_functions(ptype,
+        orig = set(self.field_info.items())
+        particle_vector_functions(ptype,
             ["particle_position_%s" % ax for ax in 'xyz'],
             ["particle_velocity_%s" % ax for ax in 'xyz'],
             self.field_info)
-        df += particle_deposition_functions(ptype,
+        particle_deposition_functions(ptype,
             "Coordinates", "particle_mass", self.field_info)
-        return df
+        standard_particle_fields(self.field_info, ptype)
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
 
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),


https://bitbucket.org/yt_analysis/yt-3.0/commits/83ebd1584b7c/
Changeset:   83ebd1584b7c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 22:37:53
Summary:     Fixing ARTIO sfc_start problem in particle IO.
Affected #:  3 files

diff -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e -r 83ebd1584b7c8cae41ad4386f5cc0c7f86623c36 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -35,6 +35,18 @@
     get_radius, \
     _get_conv
 
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi, \
+    periodic_dist, euclidean_dist
+     
 def _field_concat(fname):
     def _AllFields(field, data):
         v = []

diff -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e -r 83ebd1584b7c8cae41ad4386f5cc0c7f86623c36 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -729,9 +729,13 @@
     cdef public artio_fileset artio_handle
     cdef ARTIOSFCRangeHandler range_handler
     cdef np.int64_t level_indices[32]
+    cdef np.int64_t sfc_start
+    cdef np.int64_t sfc_end
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
         self.range_handler = range_handler
+        self.sfc_start = range_handler.sfc_start
+        self.sfc_end = range_handler.sfc_end
         self.artio_handle = range_handler.artio_handle
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
@@ -897,11 +901,9 @@
 
     def fill_sfc_particles(self, fields):
         # This handles not getting particles for refined sfc values.
-        cdef np.int64_t sfc_start, sfc_end
-        sfc_start = self.domains[0].con_id
-        sfc_end = self.domains[self.num_domains - 1].con_id
-        rv = read_sfc_particles(self.artio_handle, sfc_start, sfc_end,
-                                0, fields, self.range_handler.doct_count,
+        rv = read_sfc_particles(self.artio_handle, self.sfc_start,
+                                self.sfc_end, 0, fields,
+                                self.range_handler.doct_count,
                                 self.range_handler.pcount)
         return rv
 
@@ -982,14 +984,7 @@
             "species_%02u_secondary_variable_labels" % (species,), [])
         tp = total_particles[species]
         vp = &vpoints[species]
-        if field == "MASS" and params["particle_species_mass"][species] != 0.0:
-            vp.n_mass = 1
-            data[(species, field)] = np.zeros(tp, dtype="float64")
-            npf64arr = data[(species, field)]
-            # We fill this *now*
-            npf64arr += params["particle_species_mass"][species]
-            vp.mass = <np.float64_t*> npf64arr.data
-        elif field == "PID":
+        if field == "PID":
             vp.n_pid = 1
             data[(species, field)] = np.zeros(tp, dtype="int64")
             npi64arr = data[(species, field)]
@@ -1013,6 +1008,13 @@
             vp.s_ind[vp.n_s] = sec_vars.index(field)
             vp.svars[vp.n_s] = <np.float64_t *> npf64arr.data
             vp.n_s += 1
+        elif field == "MASS":
+            vp.n_mass = 1
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
+            # We fill this *now*
+            npf64arr += params["particle_species_mass"][species]
+            vp.mass = <np.float64_t*> npf64arr.data
 
     status = artio_particle_cache_sfc_range( handle,
             sfc_start, sfc_end ) 
@@ -1043,7 +1045,7 @@
                         secondary_variables)
                 check_artio_status(status)
                 ind = vp.count
-
+                
                 for i in range(vp.n_p):
                     vind = vp.p_ind[i]
                     vp.pvars[i][ind] = primary_variables[vind]

diff -r 81d87de58877958a2bb9bdb2ed76ad6e303a4b3e -r 83ebd1584b7c8cae41ad4386f5cc0c7f86623c36 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -33,7 +33,6 @@
 from yt.fields.particle_fields import \
     standard_particle_fields
 
-
 from yt.funcs import *
 from yt.geometry.geometry_handler import \
     GeometryHandler, YTDataChunk


https://bitbucket.org/yt_analysis/yt-3.0/commits/8745ab2be619/
Changeset:   8745ab2be619
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 22:44:29
Summary:     Fixing up a missing Radius field.
Affected #:  1 file

diff -r 83ebd1584b7c8cae41ad4386f5cc0c7f86623c36 -r 8745ab2be619bf0dc496d8a78ad306bac85b3ca1 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -649,6 +649,10 @@
 def _Radius(field, data):
     return get_radius(data, "")
 
+add_field("Radius", function=_Radius,
+          validators=[ValidateParameter("center")],
+          convert_function = _get_conv("cm"), units=r"\rm{cm}")
+
 add_field("RadiusMpc", function=_Radius,
           validators=[ValidateParameter("center")],
           convert_function = _get_conv("mpc"), units=r"\rm{Mpc}",


https://bitbucket.org/yt_analysis/yt-3.0/commits/193d820bd49d/
Changeset:   193d820bd49d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 22:56:54
Summary:     Missing import, and explicitly say gas Density.
Affected #:  2 files

diff -r 8745ab2be619bf0dc496d8a78ad306bac85b3ca1 -r 193d820bd49d88650d95a587bd9e909bc19aefb4 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -375,7 +375,8 @@
           convert_function=_convertCellMassCode)
 
 def _TotalMass(field,data):
-    return (data["Density"]+data[("deposit", "particle_density")]) * data["CellVolume"]
+    return (data["gas","Density"]+data[("deposit", "particle_density")]) * \
+            data["CellVolume"]
 add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,

diff -r 8745ab2be619bf0dc496d8a78ad306bac85b3ca1 -r 193d820bd49d88650d95a587bd9e909bc19aefb4 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -39,7 +39,8 @@
     ParticleGeometryHandler
 from yt.fields.particle_fields import \
     particle_vector_functions, \
-    particle_deposition_functions
+    particle_deposition_functions, \
+    standard_particle_fields
 from yt.geometry.oct_container import \
     OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \


https://bitbucket.org/yt_analysis/yt-3.0/commits/2cd60317d079/
Changeset:   2cd60317d079
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 22:58:16
Summary:     API drift for derived fields.
Affected #:  1 file

diff -r 193d820bd49d88650d95a587bd9e909bc19aefb4 -r 2cd60317d07998bab21e0932f0b30e43dbe3d7fc yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -2029,7 +2029,7 @@
             function=_make_wf(field, weight))
         # Now we have to tell the parameter file to add it and to calculate its
         # dependencies..
-        pf.h._derived_fields_add(["temp_weightfield"], [])
+        pf.h._derived_fields_add(["temp_weightfield"])
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
     image = np.zeros((nv,1,4), dtype='float64', order='C')
@@ -2120,7 +2120,7 @@
                 function=_make_wf(self.field, self.weight))
             # Now we have to tell the parameter file to add it and to calculate
             # its dependencies..
-            pf.h._derived_fields_add(["temp_weightfield"], [])
+            pf.h._derived_fields_add(["temp_weightfield"])
             fields = ["temp_weightfield", self.weight]
         
         self.fields = fields


https://bitbucket.org/yt_analysis/yt-3.0/commits/696eb34631b8/
Changeset:   696eb34631b8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 23:11:00
Summary:     Why are we re-assigning fd?
Affected #:  1 file

diff -r 2cd60317d07998bab21e0932f0b30e43dbe3d7fc -r 696eb34631b80bc3c34739b0ed6a458a3536531d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -469,7 +469,6 @@
             fd = self.pf.field_dependencies.get(field, None) or \
                  self.pf.field_dependencies.get(field[1], None)
             if fd is None: continue
-            fd = self.pf.field_dependencies[field]
             requested = self._determine_fields(list(set(fd.requested)))
             deps = [d for d in requested if d not in fields_to_get]
             fields_to_get += deps


https://bitbucket.org/yt_analysis/yt-3.0/commits/49dbaa55f42a/
Changeset:   49dbaa55f42a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 23:11:08
Summary:     Creating time for ARTIO needs a conversion.
Affected #:  1 file

diff -r 696eb34631b80bc3c34739b0ed6a458a3536531d -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -25,6 +25,8 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.fields.universal_fields
+from yt.fields.universal_fields import \
+    _get_conv
 from yt.fields.particle_fields import \
     particle_deposition_functions, \
     particle_vector_functions, \
@@ -243,6 +245,7 @@
         registry.add_field(
                         (ptype, "particle_velocity_%s" % ax),
                         function=NullFunc,
+                        convert_function=_get_conv("particle_velocity_%s" % ax),
                         particle_type=True)
         registry.add_field(
                         (ptype, "particle_position_%s" % ax),
@@ -259,12 +262,17 @@
 
     registry.add_field((ptype, "particle_mass_initial"),
               function=NullFunc,
-              convert_function=_convertParticleMass,
+              convert_function=_get_conv("particle_mass"),
               units=r"\rm{g}",
               particle_type=True)
 
+    registry.add_field((ptype, "creation_time"),
+            function=NullFunc,
+            convert_function=_get_conv("particle_creation_time"),
+            units=r"\rm{s}",
+            particle_type=True)
+
     for field in ["particle_index",
-                  "creation_time",
                   "particle_species",
                   "particle_metallicity1",
                   "particle_metallicity2"]:


https://bitbucket.org/yt_analysis/yt-3.0/commits/77f3afedfcc2/
Changeset:   77f3afedfcc2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 23:11:42
Summary:     Merging from mainline dev
Affected #:  50 files

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -7,7 +7,7 @@
 For details on the computations involved please refer to the following references:
 
 Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
-Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206
 """
 
 #-----------------------------------------------------------------------------
@@ -30,7 +30,7 @@
 import numpy as np
 
 I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
-        
+
 try:
     import SZpack
 except:
@@ -78,7 +78,7 @@
     >>> szprj = SZProjection(pf, freqs, high_order=True)
     """
     def __init__(self, pf, freqs, mue=1.143, high_order=False):
-            
+
         self.pf = pf
         self.num_freqs = len(freqs)
         self.high_order = high_order
@@ -99,7 +99,7 @@
         for f, field in zip(self.freqs, self.freq_fields):
             self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
             self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
-            
+
     def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an on-axis projection of the SZ signal.
 
@@ -126,7 +126,7 @@
             axis = data.get_field_parameter("axis")
             vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
             return vpar/clight
-        add_field("BetaPar", function=_beta_par)    
+        add_field("BetaPar", function=_beta_par)
 
         proj = self.pf.h.proj("Density", axis, data_source=source)
         proj.set_field_parameter("axis", axis)
@@ -137,7 +137,7 @@
         omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
         bperp2 = np.zeros((nx,nx))
         sigma1 = np.zeros((nx,nx))
-        kappa1 = np.zeros((nx,nx))                                    
+        kappa1 = np.zeros((nx,nx))
         if self.high_order:
             bperp2 = frb["BetaPerpSquared"]/dens
             sigma1 = frb["TBetaPar"]/dens/Te - bpar
@@ -149,16 +149,16 @@
         self.dx = (frb.bounds[1]-frb.bounds[0])/nx
         self.dy = (frb.bounds[3]-frb.bounds[2])/ny
         self.nx = nx
-        
+
         self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
-                                                                                                                
+
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
-        
+
         Parameters
         ----------
         L : array_like
-            The normal vector of the projection. 
+            The normal vector of the projection.
         center : array_like or string, optional
             The center of the projection.
         width : float or tuple
@@ -168,7 +168,7 @@
         source : yt.data_objects.api.AMRData, optional
             If specified, this will be the data source used for selecting regions to project.
             Currently unsupported in yt 2.x.
-                    
+
         Examples
         --------
         >>> L = np.array([0.5, 1.0, 0.75])
@@ -188,7 +188,7 @@
         if source is not None:
             mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
             raise NotImplementedError
-                
+
         def _beta_par(field, data):
             vpar = data["Density"]*(data["x-velocity"]*L[0]+
                                     data["y-velocity"]*L[1]+
@@ -225,18 +225,18 @@
         # Bad hack, but we get NaNs if we don't do something like this
         small_beta = np.abs(bpar) < 1.0e-20
         bpar[small_beta] = 1.0e-20
-                                                                   
+
         comm = communication_system.communicators[-1]
 
         nx, ny = self.nx,self.nx
         signal = np.zeros((self.num_freqs,nx,ny))
         xo = np.zeros((self.num_freqs))
-        
+
         k = int(0)
 
         start_i = comm.rank*nx/comm.size
         end_i = (comm.rank+1)*nx/comm.size
-                        
+
         pbar = get_pbar("Computing SZ signal.", nx*nx)
 
         for i in xrange(start_i, end_i):
@@ -250,9 +250,9 @@
                 k += 1
 
         signal = comm.mpi_allreduce(signal)
-        
+
         pbar.finish()
-                
+
         for i, field in enumerate(self.freq_fields):
             self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
         self.data["Tau"] = ImageArray(tau)
@@ -262,15 +262,15 @@
     def write_fits(self, filename_prefix, clobber=True):
         r""" Export images to a FITS file. Writes the SZ distortion in all
         specified frequencies as well as the mass-weighted temperature and the
-        optical depth. Distance units are in kpc.  
-        
+        optical depth. Distance units are in kpc.
+
         Parameters
         ----------
         filename_prefix : string
             The prefix of the FITS filename.
         clobber : boolean, optional
             If the file already exists, do we overwrite?
-                    
+
         Examples
         --------
         >>> szprj.write_fits("SZbullet", clobber=False)
@@ -289,17 +289,17 @@
     def write_png(self, filename_prefix):
         r""" Export images to PNG files. Writes the SZ distortion in all
         specified frequencies as well as the mass-weighted temperature and the
-        optical depth. Distance units are in kpc. 
-        
+        optical depth. Distance units are in kpc.
+
         Parameters
         ----------
         filename_prefix : string
             The prefix of the image filenames.
-                
+
         Examples
         --------
         >>> szprj.write_png("SZsloshing")
-        """     
+        """
         extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
         for field, image in self.items():
             filename=filename_prefix+"_"+field+".png"
@@ -313,22 +313,22 @@
     @parallel_root_only
     def write_hdf5(self, filename):
         r"""Export the set of S-Z fields to a set of HDF5 datasets.
-        
+
         Parameters
         ----------
         filename : string
             This file will be opened in "write" mode.
-        
+
         Examples
         --------
-        >>> szprj.write_hdf5("SZsloshing.h5")                        
+        >>> szprj.write_hdf5("SZsloshing.h5")
         """
         import h5py
         f = h5py.File(filename, "w")
         for field, data in self.items():
             f.create_dataset(field,data=data)
         f.close()
-   
+
     def keys(self):
         return self.data.keys()
 
@@ -337,7 +337,7 @@
 
     def values(self):
         return self.data.values()
-    
+
     def has_key(self, key):
         return key in self.data.keys()
 

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -398,7 +398,8 @@
             center, pf, field_parameters)
         self.left_edge = np.array(left_edge)
         self.level = level
-        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+
+        rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
         rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
         self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
@@ -488,9 +489,11 @@
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
                          for field in fields]
         domain_dims = self.pf.domain_dimensions.astype("int64") \
-                    * self.pf.refine_by**self.level
+                    * self.pf.relative_refinement(0, self.level)
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
+            # NOTE: This usage of "refine_by" is actually *okay*, because it's
+            # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
                         domain_dims, self.pf.refine_by)
@@ -647,10 +650,12 @@
         ls = self._initialize_level_state(fields)
         for level in range(self.level + 1):
             domain_dims = self.pf.domain_dimensions.astype("int64") \
-                        * self.pf.refine_by**level
+                        * self.pf.relative_refinement(0, self.level)
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
+                # NOTE: This usage of "refine_by" is actually *okay*, because it's
+                # being used with respect to iref, which is *already* scaled!
                 fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
                             chunk.ires, domain_dims, self.pf.refine_by)
@@ -682,14 +687,16 @@
     def _update_level_state(self, level_state):
         ls = level_state
         if ls.current_level >= self.level: return
+        rf = float(self.pf.relative_refinement(
+                    ls.current_level, ls.current_level + 1))
         ls.current_level += 1
-        ls.current_dx = self._base_dx / self.pf.refine_by**ls.current_level
+        ls.current_dx = self._base_dx / \
+            self.pf.relative_refinement(0, ls.current_level)
         self._setup_data_source(ls)
         LL = self.left_edge - self.pf.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex = np.rint(LL / ls.current_dx).astype('int64') - 1
         ls.domain_iwidth = np.rint(self.pf.domain_width/ls.current_dx).astype('int64') 
-        rf = float(self.pf.refine_by)
         input_left = (level_state.old_global_startindex + 0.5) * rf 
         width = (self.ActiveDimensions*self.dds)
         output_dims = np.rint(width/level_state.current_dx+0.5).astype("int32") + 2

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -56,6 +56,7 @@
         self.pf = self.hierarchy.parameter_file  # weakref already
         self._child_mask = self._child_indices = self._child_index_mask = None
         self.start_index = None
+        self.filename = filename
         self._last_mask = None
         self._last_count = -1
         self._last_selector_id = None

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -362,6 +362,9 @@
             raise RuntimeError
         return int(o2)
 
+    def relative_refinement(self, l0, l1):
+        return self.refine_by**(l1-l0)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/__init__.py
--- /dev/null
+++ b/yt/frontends/boxlib/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.orion
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/api.py
--- /dev/null
+++ b/yt/frontends/boxlib/api.py
@@ -0,0 +1,31 @@
+"""
+API for yt.frontends.orion
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      BoxlibGrid, \
+      BoxlibHierarchy, \
+      BoxlibStaticOutput, \
+      OrionHierarchy, \
+      OrionStaticOutput, \
+      CastroStaticOutput, \
+      MaestroStaticOutput
+
+from .fields import \
+      OrionFieldInfo, \
+      KnownOrionFields, \
+      add_orion_field
+
+from .io import \
+      IOHandlerBoxlib

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/data_structures.py
--- /dev/null
+++ b/yt/frontends/boxlib/data_structures.py
@@ -0,0 +1,838 @@
+"""
+Data structures for Boxlib Codes 
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import weakref
+import itertools
+
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
+import numpy as np
+
+from yt.funcs import *
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.geometry.grid_geometry_handler import GridGeometryHandler
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.geometry.selection_routines import \
+    RegionSelector
+from yt.utilities.io_handler import \
+    io_registry
+
+from .definitions import \
+    orion2enzoDict, \
+    parameterDict
+from .fields import \
+    OrionFieldInfo, \
+    add_orion_field, \
+    KnownOrionFields
+from .io import IOHandlerBoxlib
+# This is what we use to find scientific notation that might include d's
+# instead of e's.
+_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+# This is the dimensions in the Cell_H file for each level
+_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+# This is the line that prefixes each set of data for a FAB in the FAB file
+_header_pattern = re.compile(r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), " +
+                             r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
+                             "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
+
+
+
+class BoxlibGrid(AMRGridPatch):
+    _id_offset = 0
+    _offset = -1
+
+    def __init__(self, grid_id, offset, filename = None,
+                 hierarchy = None):
+        super(BoxlibGrid, self).__init__(grid_id, filename, hierarchy)
+        self._base_offset = offset
+        self._parent_id = []
+        self._children_ids = []
+
+    def _prepare_grid(self):
+        super(BoxlibGrid, self)._prepare_grid()
+        my_ind = self.id - self._id_offset
+        self.start_index = self.hierarchy.grid_start_index[my_ind]
+
+    def get_global_startindex(self):
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        my_ind = self.id - self._id_offset
+        self.dds = self.hierarchy.level_dds[self.Level,:]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "BoxlibGrid_%04i" % (self.id)
+
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.hierarchy.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.hierarchy.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
+    def _seek(self, f):
+        # This will either seek to the _offset or figure out the correct
+        # _offset.
+        if self._offset == -1:
+            f.seek(self._base_offset, os.SEEK_SET)
+            f.readline()
+            self._offset = f.tell()
+        else:
+            f.seek(self._offset)
+
+    # We override here because we can have varying refinement levels
+    def select_ires(self, dobj):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty(0, dtype='int64')
+        coords = np.empty(self._last_count, dtype='int64')
+        coords[:] = self.Level + self.pf.level_offsets[self.Level]
+        return coords
+
+    # Override this as well, since refine_by can vary
+    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+        rf = self.pf.ref_factors[self.Level]
+        if dlevel != 1:
+            raise NotImplementedError
+        gi, cgi = self.get_global_startindex(), child.get_global_startindex()
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+                              self.ActiveDimensions)
+        endIndex += (startIndex == endIndex)
+        mask[startIndex[0]:endIndex[0],
+             startIndex[1]:endIndex[1],
+             startIndex[2]:endIndex[2]] = tofill
+
+class BoxlibHierarchy(GridGeometryHandler):
+    grid = BoxlibGrid
+    def __init__(self, pf, data_style='boxlib_native'):
+        self.data_style = data_style
+        self.header_filename = os.path.join(pf.output_dir, 'Header')
+        self.directory = pf.output_dir
+
+        GridGeometryHandler.__init__(self, pf, data_style)
+        self._cache_endianness(self.grids[-1])
+        #self._read_particles()
+
+    def _parse_hierarchy(self):
+        """
+        read the global header file for an Boxlib plotfile output.
+        """
+        self.max_level = self.parameter_file._max_level
+        header_file = open(self.header_filename,'r')
+        # We can now skip to the point in the file we want to start parsing at.
+        header_file.seek(self.parameter_file._header_mesh_start)
+
+        dx = []
+        for i in range(self.max_level + 1):
+            dx.append([float(v) for v in header_file.next().split()])
+        self.level_dds = np.array(dx, dtype="float64")
+        if int(header_file.next()) != 0:
+            raise RunTimeError("yt only supports cartesian coordinates.")
+        if int(header_file.next()) != 0:
+            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+
+        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        self.grids = []
+        grid_counter = 0
+        for level in range(self.max_level + 1):
+            vals = header_file.next().split()
+            # should this be grid_time or level_time??
+            lev, ngrids, grid_time = int(vals[0]),int(vals[1]),float(vals[2])
+            assert(lev == level)
+            nsteps = int(header_file.next())
+            for gi in range(ngrids):
+                xlo, xhi = [float(v) for v in header_file.next().split()]
+                ylo, yhi = [float(v) for v in header_file.next().split()]
+                zlo, zhi = [float(v) for v in header_file.next().split()]
+                self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
+                self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
+            # Now we get to the level header filename, which we open and parse.
+            fn = os.path.join(self.parameter_file.output_dir,
+                              header_file.next().strip())
+            level_header_file = open(fn + "_H")
+            level_dir = os.path.dirname(fn)
+            # We skip the first two lines, although they probably contain
+            # useful information I don't know how to decipher.
+            level_header_file.next()
+            level_header_file.next()
+            # Now we get the number of data files
+            ncomp_this_file = int(level_header_file.next())
+            # Skip the next line, and we should get the number of grids / FABs
+            # in this file
+            level_header_file.next()
+            # To decipher this next line, we expect something like:
+            # (8 0
+            # where the first is the number of FABs in this level.
+            ngrids = int(level_header_file.next().split()[0][1:])
+            # Now we can iterate over each and get the indices.
+            for gi in range(ngrids):
+                # components within it
+                start, stop = _dim_finder.match(level_header_file.next()).groups()
+                start = np.array(start.split(","), dtype="int64")
+                stop = np.array(stop.split(","), dtype="int64")
+                dims = stop - start + 1
+                self.grid_dimensions[grid_counter + gi,:] = dims
+                self.grid_start_index[grid_counter + gi,:] = start
+            # Now we read two more lines.  The first of these is a close
+            # parenthesis.
+            level_header_file.next()
+            # This line I'm not 100% sure of, but it's either number of grids
+            # or number of FABfiles.
+            level_header_file.next()
+            # Now we iterate over grids to find their offsets in each file.
+            for gi in range(ngrids):
+                # Now we get the data file, at which point we're ready to
+                # create the grid.
+                dummy, filename, offset = level_header_file.next().split()
+                filename = os.path.join(level_dir, filename)
+                go = self.grid(grid_counter + gi, int(offset), filename, self)
+                go.Level = self.grid_levels[grid_counter + gi,:] = level
+                self.grids.append(go)
+            grid_counter += ngrids
+            # already read the filenames above...
+        self.float_type = 'float64'
+
+    def _cache_endianness(self,test_grid):
+        """
+        Cache the endianness and bytes perreal of the grids by using a
+        test grid and assuming that all grids have the same
+        endianness. This is a pretty safe assumption since Boxlib uses
+        one file per processor, and if you're running on a cluster
+        with different endian processors, then you're on your own!
+        """
+        # open the test file & grab the header
+        with open(os.path.expanduser(test_grid.filename), 'rb') as f:
+            header = f.readline()
+        
+        bpr, endian, start, stop, centering, nc = \
+            _header_pattern.search(header).groups()
+        # Note that previously we were using a different value for BPR than we
+        # use now.  Here is an example set of information directly from BoxLib:
+        #  * DOUBLE data
+        #  * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27
+        #  * FLOAT data
+        #  * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27
+        if bpr == endian[0]:
+            dtype = '<f%s' % bpr
+        elif bpr == endian[-1]:
+            dtype = '>f%s' % bpr
+        else:
+            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+        mylog.debug("FAB header suggests dtype of %s", dtype)
+        self._dtype = np.dtype(dtype)
+
+    def _populate_grid_objects(self):
+        mylog.debug("Creating grid objects")
+        self.grids = np.array(self.grids, dtype='object')
+        self._reconstruct_parent_child()
+        for i, grid in enumerate(self.grids):
+            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            grid._prepare_grid()
+            grid._setup_dx()
+        mylog.debug("Done creating grid objects")
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
+
+    def _count_grids(self):
+        # We can get everything from the Header file, but note that we're
+        # duplicating some work done elsewhere.  In a future where we don't
+        # pre-allocate grid arrays, this becomes unnecessary.
+        header_file = open(self.header_filename, 'r')
+        header_file.seek(self.parameter_file._header_mesh_start)
+        # Skip over the level dxs, geometry and the zero:
+        [header_file.next() for i in range(self.parameter_file._max_level + 3)]
+        # Now we need to be very careful, as we've seeked, and now we iterate.
+        # Does this work?  We are going to count the number of places that we
+        # have a three-item line.  The three items would be level, number of
+        # grids, and then grid time.
+        self.num_grids = 0
+        for line in header_file:
+            if len(line.split()) != 3: continue
+            self.num_grids += int(line.split()[1])
+        
+    def _initialize_grid_arrays(self):
+        super(BoxlibHierarchy, self)._initialize_grid_arrays()
+        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+
+    def _initialize_state_variables(self):
+        """override to not re-initialize num_grids in AMRHierarchy.__init__
+
+        """
+        self._parallel_locking = False
+        self._data_file = None
+        self._data_mode = None
+        self._max_locations = {}
+
+    def _detect_fields(self):
+        # This is all done in _parse_header_file
+        self.field_list = self.parameter_file._field_list[:]
+        self.field_indexes = dict((f, i)
+                                for i, f in enumerate(self.field_list))
+        # There are times when field_list may change.  We copy it here to
+        # avoid that possibility.
+        self.field_order = [f for f in self.field_list]
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class BoxlibStaticOutput(StaticOutput):
+    """
+    This class is a stripped down class that simply reads and parses
+    *filename*, without looking at the Boxlib hierarchy.
+    """
+    _hierarchy_class = BoxlibHierarchy
+    _fieldinfo_fallback = OrionFieldInfo
+    _fieldinfo_known = KnownOrionFields
+    _output_prefix = None
+
+    # THIS SHOULD BE FIXED:
+    periodicity = (True, True, True)
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='boxlib_native',
+                 storage_filename = None):
+        """
+        The paramfile is usually called "inputs"
+        and there may be a fortran inputs file usually called "probin"
+        plotname here will be a directory name
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
+        """
+        self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
+        self.cparam_filename = self._localize_check(cparam_filename)
+        self.fparam_filename = self._localize_check(fparam_filename)
+        self.storage_filename = storage_filename
+
+        StaticOutput.__init__(self, output_dir, data_style)
+
+        # These are still used in a few places.
+        self.parameters["HydroMethod"] = 'boxlib'
+        self.parameters["Time"] = 1. # default unit is 1...
+        self.parameters["EOSType"] = -1 # default
+
+    def _localize_check(self, fn):
+        # If the file exists, use it.  If not, set it to None.
+        root_dir = os.path.dirname(self.output_dir)
+        full_fn = os.path.join(root_dir, fn)
+        if os.path.exists(full_fn):
+            return full_fn
+        return None
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename) and \
+           not os.path.exists(jobinfo_filename):
+            return True # We have no parameters to go off of
+        # If we do have either inputs or jobinfo, we should be deferring to a
+        # different frontend.
+        return False
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+        self._parse_header_file()
+        # Let's read the file
+        hfn = os.path.join(self.output_dir, 'Header')
+        self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
+        # the 'inputs' file is now optional
+        self._parse_cparams()
+        self._parse_fparams()
+
+    def _parse_cparams(self):
+        if self.cparam_filename is None:
+            return
+        for line in (line.split("#")[0].strip() for line in
+                     open(self.cparam_filename)):
+            if len(line) == 0: continue
+            param, vals = [s.strip() for s in line.split("=")]
+            if param == "amr.n_cell":
+                vals = self.domain_dimensions = np.array(vals.split(), dtype='int32')
+            elif param == "amr.ref_ratio":
+                vals = self.refine_by = int(vals[0])
+            elif param == "Prob.lo_bc":
+                vals = self.periodicity = ensure_tuple([i == 0 for i in vals.split()])
+            elif param == "castro.use_comoving":
+                vals = self.cosmological_simulation = int(vals)
+            else:
+                # Now we guess some things about the parameter and its type
+                v = vals.split()[0] # Just in case there are multiple; we'll go
+                                    # back afterward to using vals.
+                try:
+                    float(v.upper().replace("D","E"))
+                except:
+                    pcast = str
+                else:
+                    syms = (".", "D+", "D-", "E+", "E-")
+                    if any(sym in v.upper() for sym in syms for v in vals.split()):
+                        pcast = float
+                    else:
+                        pcast = int
+                vals = [pcast(v) for v in vals.split()]
+                if len(vals) == 1: vals = vals[0]
+            self.parameters[param] = vals
+
+        if getattr(self, "cosmological_simulation", 0) == 1:
+            self.omega_lambda = self.parameters["comoving_OmL"]
+            self.omega_matter = self.parameters["comoving_OmM"]
+            self.hubble_constant = self.parameters["comoving_h"]
+            a_file = open(os.path.join(self.output_dir,'comoving_a'))
+            line = a_file.readline().strip()
+            a_file.close()
+            self.current_redshift = 1/float(line) - 1
+        else:
+            self.current_redshift = self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def _parse_fparams(self):
+        """
+        Parses the fortran parameter file for Orion. Most of this will
+        be useless, but this is where it keeps mu = mass per
+        particle/m_hydrogen.
+        """
+        if self.fparam_filename is None:
+            return
+        for line in (l for l in open(self.fparam_filename) if "=" in l):
+            param, vals = [v.strip() for v in line.split("=")]
+            # Now, there are a couple different types of parameters.
+            # Some will be where you only have floating point values, others
+            # will be where things are specified as string literals.
+            # Unfortunately, we're also using Fortran values, which will have
+            # things like 1.d-2 which is pathologically difficult to parse if
+            # your C library doesn't include 'd' in its locale for strtod.
+            # So we'll try to determine this.
+            vals = vals.split()
+            if any(_scinot_finder.match(v) for v in vals):
+                vals = [float(v.replace("D","e").replace("d","e"))
+                        for v in vals]
+            if len(vals) == 1:
+                vals = vals[0]
+            self.parameters[param] = vals
+
+    def _parse_header_file(self):
+        """
+        We parse the Boxlib header, which we use as our basis.  Anything in the
+        inputs file will override this, but the inputs file is not strictly
+        necessary for orientation of the data in space.
+        """
+
+        # Note: Python uses a read-ahead buffer, so using .next(), which would
+        # be my preferred solution, won't work here.  We have to explicitly
+        # call readline() if we want to end up with an offset at the very end.
+        # Fortunately, elsewhere we don't care about the offset, so we're fine
+        # everywhere else using iteration exclusively.
+        header_file = open(os.path.join(self.output_dir,'Header'))
+        self.orion_version = header_file.readline().rstrip()
+        n_fields = int(header_file.readline())
+
+        self._field_list = [header_file.readline().strip()
+                           for i in range(n_fields)]
+
+        self.dimensionality = int(header_file.readline())
+        if self.dimensionality != 3:
+            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
+        self.current_time = float(header_file.readline())
+        # This is traditionally a hierarchy attribute, so we will set it, but
+        # in a slightly hidden variable.
+        self._max_level = int(header_file.readline()) 
+        self.domain_left_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        self.domain_right_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        ref_factors = np.array([int(i) for i in
+                                header_file.readline().split()])
+        if ref_factors.size == 0:
+            # We use a default of two, as Nyx doesn't always output this value
+            ref_factors = [2] * (self._max_level + 1)
+        # We can't vary refinement factors based on dimension, or whatever else
+        # they are vaied on.  In one curious thing, I found that some Castro 3D
+        # data has only two refinement factors, which I don't know how to
+        # understand.
+        self.ref_factors = ref_factors
+        if np.unique(ref_factors).size > 1:
+            # We want everything to be a multiple of this.
+            self.refine_by = min(ref_factors)
+            # Check that they're all multiples of the minimum.
+            if not all(float(rf)/self.refine_by ==
+                   int(float(rf)/self.refine_by) for rf in ref_factors):
+                raise RuntimeError
+            base_log = np.log2(self.refine_by)
+            self.level_offsets = [0] # level 0 has to have 0 offset
+            lo = 0
+            for lm1, rf in enumerate(self.ref_factors):
+                lo += int(np.log2(rf) / base_log) - 1
+                self.level_offsets.append(lo)
+        #assert(np.unique(ref_factors).size == 1)
+        else:
+            self.refine_by = ref_factors[0]
+            self.level_offsets = [0 for l in range(self._max_level + 1)]
+        # Now we read the global index space, to get 
+        index_space = header_file.readline()
+        # This will be of the form:
+        #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
+        # So note that if we split it all up based on spaces, we should be
+        # fine, as long as we take the first two entries, which correspond to
+        # the root level.  I'm not 100% pleased with this solution.
+        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        start = np.array(root_space[0].split(","), dtype="int64")
+        stop = np.array(root_space[1].split(","), dtype="int64")
+        self.domain_dimensions = stop - start + 1
+        # Skip timesteps per level
+        header_file.readline()
+        self._header_mesh_start = header_file.tell()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+
+    def relative_refinement(self, l0, l1):
+        offset = self.level_offsets[l1] - self.level_offsets[l0]
+        return self.refine_by**(l1-l0 + offset)
+
+class OrionHierarchy(BoxlibHierarchy):
+    
+    def __init__(self, pf, data_style='orion_native'):
+        BoxlibHierarchy.__init__(self, pf, data_style)
+        self._read_particles()
+        #self.io = IOHandlerOrion
+
+    def _read_particles(self):
+        """
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
+
+        """
+        self.grid_particle_count = np.zeros(len(self.grids))
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.output_dir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip()[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+        return True
+                
+class OrionStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = OrionHierarchy
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='orion_native',
+                 storage_filename = None):
+
+        BoxlibStaticOutput.__init__(self, output_dir,
+                 cparam_filename, fparam_filename, data_style)
+          
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args                                                                               
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.                                      
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow                                                     
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename):
+            return False
+        if os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others                                                             
+        lines = open(inputs_filename).readlines()
+        if any(("castro." in line for line in lines)): return False
+        if any(("nyx." in line for line in lines)): return False
+        if any(("geometry.prob_lo" in line for line in lines)): return True
+        return False
+
+class CastroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+
+class MaestroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if not os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        # Maestro outputs have "Castro" in them
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+
+
+class NyxHierarchy(BoxlibHierarchy):
+
+    def __init__(self, pf, data_style='nyx_native'):
+        super(NyxHierarchy, self).__init__(pf, data_style)
+        self._read_particle_header()
+
+    def _read_particle_header(self):
+        if not self.pf.parameters["particles.write_in_plotfile"]:
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
+            return
+        for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
+                  ['particle_mass'] +  \
+                  ['particle_velocity_%s' % ax for ax in 'xyz']:
+            self.field_list.append(("io", fn))
+        header = open(os.path.join(self.pf.output_dir, "DM", "Header"))
+        version = header.readline()
+        ndim = header.readline()
+        nfields = header.readline()
+        ntotalpart = int(header.readline())
+        dummy = header.readline() # nextid
+        maxlevel = int(header.readline()) # max level
+
+        # Skip over how many grids on each level; this is degenerate
+        for i in range(maxlevel + 1):dummy = header.readline()
+
+        grid_info = np.fromiter((int(i) for line in header.readlines()
+                                 for i in line.split()),
+                                dtype='int64',
+                                count=3*self.num_grids).reshape((self.num_grids, 3))
+        # we need grid_info in `populate_grid_objects`, so save it to self
+
+        for g, pg in itertools.izip(self.grids, grid_info):
+            g.particle_filename = os.path.join(self.pf.output_dir, "DM",
+                                               "Level_%s" % (g.Level),
+                                               "DATA_%04i" % pg[0])
+            g.NumberOfParticles = pg[1]
+            g._particle_offset = pg[2]
+
+        self.grid_particle_count[:, 0] = grid_info[:, 1]
+
+class NyxStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = NyxHierarchy
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        pname = args[0].rstrip("/")
+        dn = os.path.dirname(pname)
+        if len(args) > 1:
+            kwargs['paramFilename'] = args[1]
+
+        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
+
+        # @todo: new Nyx output.
+        # We check for the job_info file's existence because this is currently
+        # what distinguishes Nyx data from MAESTRO data.
+        pfn = os.path.join(pfname)
+        if not os.path.exists(pfn): return False
+        nyx = any(("nyx." in line for line in open(pfn)))
+        maestro = os.path.exists(os.path.join(pname, "job_info"))
+        orion = (not nyx) and (not maestro)
+        return nyx
+
+    def _parse_parameter_file(self):
+        super(NyxStaticOutput, self)._parse_parameter_file()
+        #return
+        # Nyx is always cosmological.
+        self.cosmological_simulation = 1
+        self.omega_lambda = self.parameters["comoving_OmL"]
+        self.omega_matter = self.parameters["comoving_OmM"]
+        self.hubble_constant = self.parameters["comoving_h"]
+
+        # Read in the `comoving_a` file and parse the value. We should fix this
+        # in the new Nyx output format...
+        a_file = open(os.path.join(self.output_dir, "comoving_a"))
+        a_string = a_file.readline().strip()
+        a_file.close()
+
+        # Set the scale factor and redshift
+        self.cosmological_scale_factor = float(a_string)
+        self.parameters["CosmologyCurrentRedshift"] = 1 / float(a_string) - 1
+
+        # alias
+        self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+        if self.parameters["particles.write_in_plotfile"]:
+            self.particle_types = ("io",)
+            self.particle_types_raw = self.particle_types
+
+    def _set_units(self):
+        super(NyxStaticOutput, self)._set_units()
+        # Masses are always in $ M_{\odot} $
+        self.units["particle_mass"] = 1.989e33
+
+        mylog.warning("Length units: setting 1.0 = 1.0 Mpc.")
+        self.units.update(mpc_conversion)
+        self.units["density"] = self.units["particle_mass"]/(self.units["cm"])**3
+        self.units["particle_mass_density"] = self.units["density"]
+
+        mylog.warning("Time units: setting 1.0 = Mpc/km s ~ 10^12 yr .")
+        self.time_units["s"] = 1.0 / 3.08568025e19
+        self.conversion_factors["Time"] = 1.0 / 3.08568025e19
+
+        cf = 1e5 * (self.cosmological_scale_factor)
+        for ax in "xyz":
+            self.units["particle_velocity_%s" % ax] = cf
+
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.time_units["s"] / sec_conversion[unit]
+

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/definitions.py
--- /dev/null
+++ b/yt/frontends/boxlib/definitions.py
@@ -0,0 +1,62 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from yt.funcs import *
+
+# TODO: get rid of enzo parameters we do not need
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                }
+
+
+# converts the Orion inputs file name to the Enzo/yt name expected
+# throughout the code. key is Orion name, value is Enzo/yt equivalent
+orion2enzoDict = {"amr.n_cell": "TopGridDimensions",
+                  "materials.gamma": "Gamma",
+                  "amr.ref_ratio": "RefineBy",
+                  "castro.use_comoving": "ComovingCoordinates",
+                  "castro.redshift_in": "CosmologyInitialRedshift",
+                  "comoving_OmL": "CosmologyOmegaLambdaNow",
+                  "comoving_OmM": "CosmologyOmegaMatterNow",
+                  "comoving_h": "CosmologyHubbleConstantNow"
+                  }
+

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/fields.py
--- /dev/null
+++ b/yt/frontends/boxlib/fields.py
@@ -0,0 +1,183 @@
+"""
+Orion-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.physical_constants import \
+    mh, kboltz
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.fields.universal_fields
+
+
+KnownOrionFields = FieldInfoContainer()
+add_orion_field = KnownOrionFields.add_field
+
+OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = OrionFieldInfo.add_field
+
+add_orion_field("density", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+
+add_orion_field("eden", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
+
+add_orion_field("xmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+add_orion_field("ymom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
+
+add_orion_field("zmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+translation_dict = {"x-velocity": "xvel",
+                    "y-velocity": "yvel",
+                    "z-velocity": "zvel",
+                    "Density": "density",
+                    "TotalEnergy": "eden",
+                    "Temperature": "temperature",
+                    "x-momentum": "xmom",
+                    "y-momentum": "ymom",
+                    "z-momentum": "zmom"
+                   }
+
+for f,v in translation_dict.items():
+    if v not in KnownOrionFields:
+        add_orion_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)])
+    ff = KnownOrionFields[v]
+    add_field(f, TranslationFunc(v),
+              take_log=KnownOrionFields[v].take_log,
+              units = ff._units, display_name=f)
+
+def _xVelocity(field, data):
+    """generate x-velocity from x-momentum and density
+    
+    """
+    return data["xmom"]/data["density"]
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _yVelocity(field,data):
+    """generate y-velocity from y-momentum and density
+
+    """
+    #try:
+    #    return data["xvel"]
+    #except KeyError:
+    return data["ymom"]/data["density"]
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _zVelocity(field,data):
+    """generate z-velocity from z-momentum and density
+    
+    """
+    return data["zmom"]/data["density"]
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _ThermalEnergy(field, data):
+    """generate thermal (gas energy). Dual Energy Formalism was
+        implemented by Stella, but this isn't how it's called, so I'll
+        leave that commented out for now.
+    """
+    #if data.pf["DualEnergyFormalism"]:
+    #    return data["GasEnergy"]
+    #else:
+    return data["TotalEnergy"] - 0.5 * data["density"] * (
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
+add_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
+
+def _Pressure(field,data):
+    """M{(Gamma-1.0)*e, where e is thermal energy density
+       NB: this will need to be modified for radiation
+    """
+    return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
+add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+
+def _Temperature(field,data):
+    return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
+add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+
+# particle fields
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+
+    return _Particles
+
+_particle_field_list = ["mass", 
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "r",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "luminosity",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/io.py
--- /dev/null
+++ b/yt/frontends/boxlib/io.py
@@ -0,0 +1,195 @@
+"""
+Orion data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+from yt.utilities.lib import \
+    read_castro_particles, \
+    read_and_seek
+from yt.utilities.io_handler import \
+           BaseIOHandler
+from yt.funcs import mylog, defaultdict
+
+class IOHandlerBoxlib(BaseIOHandler):
+
+    _data_style = "boxlib_native"
+
+    def __init__(self, pf, *args, **kwargs):
+        self.pf = pf
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        rv = {}
+        for field in fields:
+            rv[field] = np.empty(size, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                    size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        for chunk in chunks:
+            data = self._read_chunk_data(chunk, fields)
+            for g in chunk.objs:
+                for field in fields:
+                    ftype, fname = field
+                    ds = data[g.id].pop(fname)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
+                ind += nd
+                data.pop(g.id)
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        data = {}
+        grids_by_file = defaultdict(list)
+        if len(chunk.objs) == 0: return data
+        for g in chunk.objs:
+            if g.filename is None:
+                continue
+            grids_by_file[g.filename].append(g)
+        dtype = self.pf.hierarchy._dtype
+        bpr = dtype.itemsize
+        field_list = set(f[1] for f in fields)
+        for filename in grids_by_file:
+            grids = grids_by_file[filename]
+            grids.sort(key = lambda a: a._offset)
+            f = open(filename, "rb")
+            for grid in grids:
+                data[grid.id] = {}
+                grid._seek(f)
+                count = grid.ActiveDimensions.prod()
+                size = count * bpr
+                for field in self.pf.hierarchy.field_order:
+                    if field in field_list:
+                        # We read it ...
+                        v = np.fromfile(f, dtype=dtype, count=count)
+                        v = v.reshape(grid.ActiveDimensions, order='F')
+                        data[grid.id][field] = v
+                    else:
+                        f.seek(size, os.SEEK_CUR)
+        return data
+
+class IOHandlerOrion(IOHandlerBoxlib):
+    _data_style = "orion_native"
+
+    def _read_particles(self, grid, field): 
+        """
+        parses the Orion Star Particle text files
+        
+        """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+
+        def read(line, field):
+            return float(line.strip().split(' ')[index[field]])
+
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z") 
+                    if ( (grid.LeftEdge < coord).all() and 
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return np.array(particles)
+
+class IOHandlerCastro(IOHandlerBoxlib):
+    _data_style = "castro_native"
+
+    def _read_particle_field(self, grid, field):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+            castro_particle_field_names.index(field),
+            len(castro_particle_field_names),
+            tr)
+        return tr
+
+nyx_particle_field_names = ['particle_position_%s' % ax for ax in 'xyz'] + \
+                           ['particle_mass'] +  \
+                           ['particle_velocity_%s' % ax for ax in 'xyz']
+
+class IOHandlerNyx(IOHandlerBoxlib):
+    _data_style = "nyx_native"
+
+    def _read_particle_coords(self, chunks, ptf):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+                            nyx_particle_field_names.index(field),
+                            len(nyx_particle_field_names), tr)
+
+    def _read_particle_fields(self, chunks, ptf, fields):
+        pass

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/setup.py
--- /dev/null
+++ b/yt/frontends/boxlib/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('orion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/boxlib/tests/test_orion.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -0,0 +1,42 @@
+"""
+Orion frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.boxlib.api import OrionStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_pf(radadvect)
+def test_radadvect():
+    pf = data_dir_load(radadvect)
+    yield assert_equal, str(pf), "plt00000"
+    for test in small_patch_amr(radadvect, _fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_pf(rt)
+def test_radtube():
+    pf = data_dir_load(rt)
+    yield assert_equal, str(pf), "plt00500"
+    for test in small_patch_amr(rt, _fields):
+        test_radtube.__name__ = test.description
+        yield test

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/castro/__init__.py
--- a/yt/frontends/castro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 49dbaa55f42acbaacf18cf108f7441633ca1eb7c -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CastroGrid, \
-      CastroHierarchy, \
-      CastroStaticOutput
-
-from .fields import \
-      CastroFieldInfo, \
-      add_castro_field
-
-from .io import \
-      IOHandlerNative

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/e13b38de6299/
Changeset:   e13b38de6299
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 23:21:00
Summary:     Speed up ARTIO tests by increasing range size.
Affected #:  1 file

diff -r 77f3afedfcc2db2b35879f572c36f8d6adf4915b -r e13b38de62996e344241c92dbce1415e8bbd4935 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -30,6 +30,7 @@
 @requires_pf(sizmbhloz)
 def test_sizmbhloz():
     pf = data_dir_load(sizmbhloz)
+    pf.max_range = 1024*1024
     yield assert_equal, str(pf), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for ds in dso:


https://bitbucket.org/yt_analysis/yt-3.0/commits/326a1f88a7b3/
Changeset:   326a1f88a7b3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 23:49:12
Summary:     Removing ValidateDataField from RAMSES fields.
Affected #:  2 files

diff -r e13b38de62996e344241c92dbce1415e8bbd4935 -r 326a1f88a7b34d0472c82d4db364678f5b44ac0a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -346,7 +346,8 @@
         for domain in self.domains:
             pfl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(pfl)
-        self.field_list = self.fluid_field_list + self.particle_field_list
+        self.field_list = [("gas", f) for f in self.fluid_field_list] \
+                        + self.particle_field_list
 
     def _setup_derived_fields(self):
         self._parse_cooling()

diff -r e13b38de62996e344241c92dbce1415e8bbd4935 -r 326a1f88a7b34d0472c82d4db364678f5b44ac0a yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -57,8 +57,7 @@
 
 for f in known_ramses_fields:
     if f not in KnownRAMSESFields:
-        add_ramses_field(f, function=NullFunc, take_log=True,
-                  validators = [ValidateDataField(f)])
+        add_ramses_field(f, function=NullFunc, take_log=True)
 
 def dx(field, data):
     return data.fwidth[:,0]
@@ -176,16 +175,13 @@
              projected_units = r"\rm{g}/\rm{cm}^2")
     add_field("Comoving_%s_Density" % species,
              function=_SpeciesComovingDensity,
-             validators=ValidateDataField("%s_Density" % species),
              display_name="Comoving\/%s\/Density" % species)
     add_field("%s_Mass" % species, units=r"\rm{g}", 
               function=_SpeciesMass, 
-              validators=ValidateDataField("%s_Density" % species),
               display_name="%s\/Mass" % species)
     add_field("%s_MassMsun" % species, units=r"M_{\odot}", 
               function=_SpeciesMass, 
               convert_function=_convertCellMassMsun,
-              validators=ValidateDataField("%s_Density" % species),
               display_name="%s\/Mass" % species)
 
 _cool_axes = ("lognH", "logT", "logTeq")


https://bitbucket.org/yt_analysis/yt-3.0/commits/517f72f8633f/
Changeset:   517f72f8633f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-10 23:51:23
Summary:     Fixing FLASH IO.
Affected #:  2 files

diff -r 326a1f88a7b34d0472c82d4db364678f5b44ac0a -r 517f72f8633fbd3e97a6f3c15dd4a8d6ac8b58d0 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -50,10 +50,6 @@
     def __repr__(self):
         return "FLASHGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
-    @property
-    def filename(self):
-        return None
-
 class FLASHHierarchy(GridGeometryHandler):
 
     grid = FLASHGrid

diff -r 326a1f88a7b34d0472c82d4db364678f5b44ac0a -r 517f72f8633fbd3e97a6f3c15dd4a8d6ac8b58d0 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -25,7 +25,6 @@
     _data_style = "flash_hdf5"
 
     def __init__(self, pf):
-        self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
         super(IOHandlerFLASH, self).__init__(pf)
         # Now we cache the particle fields
         self._handle = pf._handle

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list