[yt-svn] commit/yt: 24 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Dec 31 14:59:23 PST 2013


24 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/e4d62d7e8695/
Changeset:   e4d62d7e8695
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 14:01:24
Summary:     Fix profiles and profile tests for 3.0.
Affected #:  1 file

diff -r 7026b9ddfc20c9f9f05e654fdbf84c89a42007d1 -r e4d62d7e8695f05bb135adb479b2ad04cc406dd4 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -757,8 +757,10 @@
     def add_fields(self, fields):
         fields = ensure_list(fields)
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
-        for g in parallel_objects(self.data_source._grids):
-            self._bin_grid(g, fields, temp_storage)
+        cfields = fields + list(self.bin_fields)
+        citer = self.data_source.chunks(cfields, "io")
+        for chunk in parallel_objects(citer):
+            self._bin_chunk(chunk, fields, temp_storage)
         self._finalize_storage(fields, temp_storage)
 
     def _finalize_storage(self, fields, temp_storage):
@@ -772,42 +774,35 @@
             self.field_data[field] = temp_storage.values[...,i]
             self.field_data[field][blank] = 0.0
         
-    def _bin_grid(self, grid, fields, storage):
+    def _bin_chunk(self, chunk, fields, storage):
         raise NotImplementedError
 
-    def _filter(self, bin_fields, cut_points):
-        # cut_points is initially just the points inside our region
+    def _filter(self, bin_fields):
+        # cut_points is set to be everything initially, but
         # we also want to apply a filtering based on min/max
-        filter = np.zeros(bin_fields[0].shape, dtype='bool')
-        filter[cut_points] = True
+        filter = np.ones(bin_fields[0].shape, dtype='bool')
         for (mi, ma), data in zip(self.bounds, bin_fields):
             filter &= (data > mi)
             filter &= (data < ma)
         return filter, [data[filter] for data in bin_fields]
         
-    def _get_data(self, grid, fields):
-        # Save the values in the grid beforehand.
-        old_params = grid.field_parameters
-        old_keys = grid.field_data.keys()
-        grid.field_parameters = self.data_source.field_parameters
-        # Now we ask our source which values to include
-        pointI = self.data_source._get_point_indices(grid)
-        bin_fields = [grid[bf] for bf in self.bin_fields]
+    def _get_data(self, chunk, fields):
+        # We are using chunks now, which will manage the field parameters and
+        # the like.
+        bin_fields = [chunk[bf] for bf in self.bin_fields]
         # We want to make sure that our fields are within the bounds of the
         # binning
-        filter, bin_fields = self._filter(bin_fields, pointI)
+        filter, bin_fields = self._filter(bin_fields)
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            arr[:,i] = grid[field][filter]
+            arr[:,i] = chunk[field][filter]
         if self.weight_field is not None:
-            weight_data = grid[self.weight_field]
+            weight_data = chunk[self.weight_field]
         else:
-            weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+            weight_data = np.ones(chunk.ires.size, dtype="float64")
         weight_data = weight_data[filter]
         # So that we can pass these into 
-        grid.field_parameters = old_params
-        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
         return arr, weight_data, bin_fields
 
     def __getitem__(self, key):
@@ -835,10 +830,10 @@
         self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
         self.x = self.x_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        gd = self._get_data(grid, fields)
-        if gd is None: return
-        fdata, wdata, (bf_x,) = gd
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
+        if rv is None: return
+        fdata, wdata, (bf_x,) = rv
         bin_ind = np.digitize(bf_x, self.x_bins) - 1
         new_bin_profile1d(bin_ind, wdata, fdata,
                       storage.weight_values, storage.values,
@@ -867,8 +862,8 @@
         self.x = self.x_bins
         self.y = self.y_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        rv = self._get_data(grid, fields)
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
         if rv is None: return
         fdata, wdata, (bf_x, bf_y) = rv
         bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -912,8 +907,8 @@
         self.y = self.y_bins
         self.z = self.z_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        rv = self._get_data(grid, fields)
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
         if rv is None: return
         fdata, wdata, (bf_x, bf_y, bf_z) = rv
         bin_ind_x = np.digitize(bf_x, self.x_bins) - 1


https://bitbucket.org/yt_analysis/yt/commits/5ba43b796933/
Changeset:   5ba43b796933
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 14:22:37
Summary:     Move particle setup to base Stream output type.
Affected #:  1 file

diff -r e4d62d7e8695f05bb135adb479b2ad04cc406dd4 -r 5ba43b7969336b5f49af6319434de6db29cf818c yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -323,6 +323,17 @@
     def _skip_cache(self):
         return True
 
+    def _setup_particle_type(self, ptype):
+        orig = set(self.field_info.items())
+        particle_vector_functions(ptype,
+            ["particle_position_%s" % ax for ax in 'xyz'],
+            ["particle_velocity_%s" % ax for ax in 'xyz'],
+            self.field_info)
+        particle_deposition_functions(ptype,
+            "Coordinates", "particle_mass", self.field_info)
+        standard_particle_fields(self.field_info, ptype)
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
+
 class StreamDictFieldHandler(dict):
 
     @property
@@ -751,17 +762,6 @@
     n_ref = 64
     over_refine_factor = 1
 
-    def _setup_particle_type(self, ptype):
-        orig = set(self.field_info.items())
-        particle_vector_functions(ptype,
-            ["particle_position_%s" % ax for ax in 'xyz'],
-            ["particle_velocity_%s" % ax for ax in 'xyz'],
-            self.field_info)
-        particle_deposition_functions(ptype,
-            "Coordinates", "particle_mass", self.field_info)
-        standard_particle_fields(self.field_info, ptype)
-        return [n for n, v in set(self.field_info.items()).difference(orig)]
-
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
                       n_ref = 64, over_refine_factor = 1):


https://bitbucket.org/yt_analysis/yt/commits/248a376d08a1/
Changeset:   248a376d08a1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 14:57:51
Summary:     Unify the stream frontends slightly.

This changes the main particle type in Stream to be "io", in accordance with
other frontends, and it also reduces the disparity between stream IO for
particles and non-particles.  Note that because particles don't get assigned
until later that we add "additional fields" to the field detection step.
Affected #:  2 files

diff -r 5ba43b7969336b5f49af6319434de6db29cf818c -r 248a376d08a1ead4867ad91c711ac34f157a4278 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -247,7 +247,7 @@
         """
         
         particle_types = set_particle_types(data[0])
-        ftype = "all"
+        ftype = "io"
 
         for key in data[0].keys() :
             if key is "number_of_particles": continue
@@ -262,7 +262,7 @@
                 if fname in grid.field_data:
                     grid.field_data.pop(fname, None)
                 elif (ftype, fname) in grid.field_data:
-                    grid.field_data.pop( ("all", fname) )
+                    grid.field_data.pop( ("io", fname) )
                 self.stream_handler.fields[grid.id][fname] = data[i][fname]
             
         self._detect_fields()
@@ -335,9 +335,13 @@
         return [n for n, v in set(self.field_info.items()).difference(orig)]
 
 class StreamDictFieldHandler(dict):
+    _additional_fields = ()
 
     @property
-    def all_fields(self): return self[0].keys()
+    def all_fields(self): 
+        fields = list(self._additional_fields) + self[0].keys()
+        fields = list(set(fields))
+        return fields
 
 def set_particle_types(data) :
 
@@ -364,7 +368,7 @@
     if pf.h.num_grids > 1 :
 
         try:
-            x, y, z = (pdata["all","particle_position_%s" % ax] for ax in 'xyz')
+            x, y, z = (pdata["io","particle_position_%s" % ax] for ax in 'xyz')
         except KeyError:
             raise KeyError("Cannot decompose particle data without position fields!")
         
@@ -461,7 +465,13 @@
         pdata["number_of_particles"] = number_of_particles
         for key in data.keys() :
             if len(data[key].shape) == 1 :
-                pdata[key] = data.pop(key)
+                if not isinstance(key, tuple):
+                    field = ("io", key)
+                    mylog.debug("Reassigning '%s' to '%s'", key, field)
+                else:
+                    field = key
+                sfh._additional_fields += field
+                pdata[field] = data.pop(key)
     else :
         particle_types = {}
     
@@ -519,12 +529,11 @@
     # Now figure out where the particles go
 
     if number_of_particles > 0 :
-        if ("all", "particle_position_x") not in pdata:
+        if ("io", "particle_position_x") not in pdata:
             pdata_ftype = {}
             for f in [k for k in sorted(pdata)]:
                 if not hasattr(pdata[f], "shape"): continue
-                mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
-                pdata_ftype["all",f] = pdata.pop(f)
+                pdata_ftype["io",f] = pdata.pop(f)
             pdata_ftype.update(pdata)
             pdata = pdata_ftype
         assign_particle_data(spf, pdata)
@@ -723,12 +732,12 @@
     # Now reassign particle data to grids
 
     if number_of_particles > 0:
-        if ("all", "particle_position_x") not in pdata:
+        if ("io", "particle_position_x") not in pdata:
             pdata_ftype = {}
             for f in [k for k in sorted(pdata)]:
                 if not hasattr(pdata[f], "shape"): continue
-                mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
-                pdata_ftype["all",f] = pdata.pop(f)
+                mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
+                pdata_ftype["io",f] = pdata.pop(f)
             pdata_ftype.update(pdata)
             pdata = pdata_ftype
         assign_particle_data(pf, pdata)
@@ -874,7 +883,7 @@
         self.oct_handler = oct_handler
         self._last_mask = None
         self._last_selector_id = None
-        self._current_particle_type = 'all'
+        self._current_particle_type = 'io'
         self._current_fluid_type = self.pf.default_fluid_type
         self.base_region = base_region
         self.base_selector = base_region.selector

diff -r 5ba43b7969336b5f49af6319434de6db29cf818c -r 248a376d08a1ead4867ad91c711ac34f157a4278 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -66,117 +66,40 @@
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
-        if any((ftype != "all" for ftype, fname in fields)):
-            raise NotImplementedError
-        rv = {}
-        # Now we have to do something unpleasant
-        mylog.debug("First pass: counting particles.")
-        size = 0
-        pfields = [("all", "particle_position_%s" % ax) for ax in 'xyz']
         for chunk in chunks:
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 gf = self.fields[g.id]
-                # Sometimes the stream operator won't have the 
-                # ("all", "Something") fields, but instead just "Something".
-                pns = []
-                for pn in pfields:
-                    if pn in gf: pns.append(pn)
-                    else: pns.append(pn[1])
-                size += g.count_particles(selector, 
-                    gf[pns[0]], gf[pns[1]], gf[pns[2]])
-        for field in fields:
-            # TODO: figure out dataset types
-            rv[field] = np.empty(size, dtype='float64')
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s points of %s fields in %s grids",
-                   size, [f2 for f1, f2 in fields], ng)
-        ind = 0
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z  = (gf[ptype, "particle_position_%s" % ax]
+                                for ax in 'xyz')
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
         for chunk in chunks:
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 gf = self.fields[g.id]
-                pns = []
-                for pn in pfields:
-                    if pn in gf: pns.append(pn)
-                    else: pns.append(pn[1])
-                mask = g.select_particles(selector,
-                    gf[pns[0]], gf[pns[1]], gf[pns[2]])
-                if mask is None: continue
-                for field in set(fields):
-                    if field in gf:
-                        fn = field
-                    else:
-                        fn = field[1]
-                    gdata = gf[fn][mask]
-                    rv[field][ind:ind+gdata.size] = gdata
-                ind += gdata.size
-        return rv
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z  = (gf[ptype, "particle_position_%s" % ax]
+                                for ax in 'xyz')
+                    mask = selector.select_points(x, y, z)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = np.asarray(gf[ptype, field])
+                        yield (ptype, field), data[mask]
 
     @property
     def _read_exception(self):
         return KeyError
 
-class StreamParticleIOHandler(BaseIOHandler):
+class StreamParticleIOHandler(IOHandlerStream):
 
     _data_style = "stream_particles"
 
-    def __init__(self, pf):
-        self.fields = pf.stream_handler.fields
-        super(StreamParticleIOHandler, self).__init__(pf)
-
-    def _read_particle_selection(self, chunks, selector, fields):
-        rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        psize = defaultdict(lambda: 0)
-        chunks = list(chunks)
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-        # For this type of file, we actually have something slightly different.
-        # We are given a list of ParticleDataChunks, which is composed of
-        # individual ParticleOctreeSubsets.  The data_files attribute on these
-        # may in fact overlap.  So we will iterate over a union of all the
-        # data_files.
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in data_files:
-            f = self.fields[data_file.filename]
-            # This double-reads
-            for ptype, field_list in sorted(ptf.items()):
-                assert(ptype == "all")
-                psize[ptype] += selector.count_points(
-                        f["particle_position_x"],
-                        f["particle_position_y"],
-                        f["particle_position_z"])
-        # Now we have all the sizes, and we can allocate
-        ind = {}
-        for field in fields:
-            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            rv[field] = np.empty(psize[field[0]], dtype="float64")
-            ind[field] = 0
-        for data_file in data_files:
-            f = self.fields[data_file.filename]
-            for ptype, field_list in sorted(ptf.items()):
-                assert(ptype == "all")
-                mask = selector.select_points(
-                        f["particle_position_x"],
-                        f["particle_position_y"],
-                        f["particle_position_z"])
-                if mask is None: continue
-                for field in field_list:
-                    data = f[field][mask,...]
-                    my_ind = ind[ptype, field]
-                    mylog.debug("Filling from %s to %s with %s",
-                        my_ind, my_ind+data.shape[0], field)
-                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                    ind[ptype, field] += data.shape[0]
-        return rv
-
     def _initialize_index(self, data_file, regions):
         # self.fields[g.id][fname] is the pattern here
         pos = np.column_stack(self.fields[data_file.filename][


https://bitbucket.org/yt_analysis/yt/commits/a44df5d77c00/
Changeset:   a44df5d77c00
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 14:58:13
Summary:     Enable testing with particle fields in fake_random_pf.
Affected #:  1 file

diff -r 248a376d08a1ead4867ad91c711ac34f157a4278 -r a44df5d77c0038deb22a056f3f4b56a94e7d6006 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -138,7 +138,7 @@
     return left, right, level
 
 def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
-                   negative = False, nprocs = 1):
+                   negative = False, nprocs = 1, particles = 0):
     from yt.data_objects.api import data_object_registry
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
@@ -156,6 +156,13 @@
             offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
                  for field,offset in zip(fields,offsets))
+    if particles:
+        for f in ('particle_position_%s' % ax for ax in 'xyz'):
+            data[f] = np.random.uniform(size = particles)
+        for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
+            data[f] = np.random.random(size = particles) - 0.5
+        data['particle_mass'] = np.random.random(particles)
+        data['number_of_particles'] = particles
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug
 


https://bitbucket.org/yt_analysis/yt/commits/aa9a9335f1fe/
Changeset:   aa9a9335f1fe
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 14:59:09
Summary:     Adding ProfileND subclasses to api and mods.
Affected #:  2 files

diff -r a44df5d77c0038deb22a056f3f4b56a94e7d6006 -r aa9a9335f1fe2a7827e90d1d65b6d70fc555a05b yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -32,7 +32,10 @@
     BinnedProfile1D, \
     BinnedProfile2D, \
     BinnedProfile3D, \
-    create_profile
+    create_profile, \
+    Profile1D, \
+    Profile2D, \
+    Profile3D
 
 from time_series import \
     TimeSeriesData, \

diff -r a44df5d77c0038deb22a056f3f4b56a94e7d6006 -r aa9a9335f1fe2a7827e90d1d65b6d70fc555a05b yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,8 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter, create_profile
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info


https://bitbucket.org/yt_analysis/yt/commits/d0ce0317d2b4/
Changeset:   d0ce0317d2b4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 15:04:17
Summary:     Adding more profile tests for particle fields.
Affected #:  1 file

diff -r aa9a9335f1fe2a7827e90d1d65b6d70fc555a05b -r d0ce0317d2b44a36429c2901b9c7db310f7972d2 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -152,3 +152,25 @@
         p3d.add_fields(["Ones"])
         yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
 
+def test_particle_profiles():
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(32, nprocs=nproc, particles = 32**3)
+        dd = pf.h.all_data()
+
+        p1d = Profile1D(dd, "particle_position_x", 128,
+                        0.0, 1.0, False, weight_field = None)
+        p1d.add_fields(["particle_ones"])
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
+        p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+                            "particle_position_y", 128, 0.0, 1.0, False,
+                        weight_field = None)
+        p2d.add_fields(["particle_ones"])
+        yield assert_equal, p2d["particle_ones"].sum(), 32**3
+
+        p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+                            "particle_position_y", 128, 0.0, 1.0, False,
+                            "particle_position_z", 128, 0.0, 1.0, False,
+                        weight_field = None)
+        p3d.add_fields(["particle_ones"])
+        yield assert_equal, p3d["particle_ones"].sum(), 32**3


https://bitbucket.org/yt_analysis/yt/commits/53da801b2cf9/
Changeset:   53da801b2cf9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 15:33:22
Summary:     Reassign particle type names earlier in load_particles.
Affected #:  1 file

diff -r d0ce0317d2b44a36429c2901b9c7db310f7972d2 -r 53da801b2cf9e76b96f03f2d7b9b2a5c35e0407f yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -828,8 +828,18 @@
 
     sfh = StreamDictFieldHandler()
     
+    pdata = {}
+    for key in data.keys() :
+        if not isinstance(key, tuple):
+            field = ("io", key)
+            mylog.debug("Reassigning '%s' to '%s'", key, field)
+        else:
+            field = key
+        pdata[field] = data[key]
+        sfh._additional_fields += field
+    data = pdata # Drop reference count
     particle_types = set_particle_types(data)
-    
+
     sfh.update({'stream_file':data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge


https://bitbucket.org/yt_analysis/yt/commits/5c6046f8f073/
Changeset:   5c6046f8f073
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 15:41:55
Summary:     Splitting back up the IO handlers.
Affected #:  1 file

diff -r 53da801b2cf9e76b96f03f2d7b9b2a5c35e0407f -r 5c6046f8f073e12f75de477fc4536e3ba6051902 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -96,14 +96,50 @@
     def _read_exception(self):
         return KeyError
 
-class StreamParticleIOHandler(IOHandlerStream):
+class StreamParticleIOHandler(BaseIOHandler):
 
     _data_style = "stream_particles"
 
+    def __init__(self, pf):
+        self.fields = pf.stream_handler.fields
+        super(StreamParticleIOHandler, self).__init__(pf)
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                yield ptype, (f[ptype, "particle_position_x"],
+                              f[ptype, "particle_position_y"],
+                              f[ptype, "particle_position_z"])
+            
+    def _read_particle_fields(self, chunks, ptf, selector):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            for ptype, field_list in sorted(ptf.items()):
+                x, y, z = (f[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz')
+                mask = selector.select_points(x, y, z)
+                if mask is None: continue
+                for field in field_list:
+                    data = f[ptype, field][mask]
+                    yield (ptype, field), data
+
+
     def _initialize_index(self, data_file, regions):
         # self.fields[g.id][fname] is the pattern here
         pos = np.column_stack(self.fields[data_file.filename][
-                              "particle_position_%s" % ax] for ax in 'xyz')
+                              ("io", "particle_position_%s" % ax)]
+                              for ax in 'xyz')
         if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
            np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
             raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
@@ -117,11 +153,11 @@
         return morton
 
     def _count_particles(self, data_file):
-        npart = self.fields[data_file.filename]["particle_position_x"].size
-        return {'all': npart}
+        npart = self.fields[data_file.filename]["io", "particle_position_x"].size
+        return {'io': npart}
 
     def _identify_fields(self, data_file):
-        return [ ("all", k) for k in self.fields[data_file.filename].keys()]
+        return self.fields[data_file.filename].keys()
 
 class IOHandlerStreamHexahedral(BaseIOHandler):
     _data_style = "stream_hexahedral"


https://bitbucket.org/yt_analysis/yt/commits/aaed035a79b3/
Changeset:   aaed035a79b3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 15:56:54
Summary:     Fix failing test by being explicit.

dobj["all", "Coordinates] is currently not working for load_particles()
datasets.  I am not entirely certain why, but it does work in the unitrefactor,
and I think the field systems have diverged enough that I do not want to patch
it here when we will transition to that at some point in the near future.
Affected #:  1 file

diff -r 5c6046f8f073e12f75de477fc4536e3ba6051902 -r aaed035a79b3dbbc4de08d8ec571e5acd5ea9b90 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -88,7 +88,7 @@
     for n_ref in [16, 32, 64, 512, 1024]:
         pf = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
         dd = pf.h.all_data()
-        bi = dd["all","mesh_id"]
+        bi = dd["io","mesh_id"]
         v = np.bincount(bi.astype("int64"))
         yield assert_equal, v.max() <= n_ref, True
 


https://bitbucket.org/yt_analysis/yt/commits/66292b6addc4/
Changeset:   66292b6addc4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 16:08:40
Summary:     These need to be tuples.
Affected #:  1 file

diff -r aaed035a79b3dbbc4de08d8ec571e5acd5ea9b90 -r 66292b6addc4180376006121ad5cf0e71e63be9a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -470,7 +470,7 @@
                     mylog.debug("Reassigning '%s' to '%s'", key, field)
                 else:
                     field = key
-                sfh._additional_fields += field
+                sfh._additional_fields += (field,)
                 pdata[field] = data.pop(key)
     else :
         particle_types = {}
@@ -708,7 +708,7 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                if not pf.field_info[field].particle_type :
+                if not pf._get_field_info(field).particle_type :
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
@@ -836,7 +836,7 @@
         else:
             field = key
         pdata[field] = data[key]
-        sfh._additional_fields += field
+        sfh._additional_fields += (field,)
     data = pdata # Drop reference count
     particle_types = set_particle_types(data)
 


https://bitbucket.org/yt_analysis/yt/commits/8ed00de661e4/
Changeset:   8ed00de661e4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 16:09:17
Summary:     More explicit about particle types here.
Affected #:  1 file

diff -r 66292b6addc4180376006121ad5cf0e71e63be9a -r 8ed00de661e4d9e745b6d21e22a1fd9bd99dde21 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -74,16 +74,18 @@
 
     # Check to make sure the fields have been defined correctly
     
-    assert ug1._get_field_info("all", "particle_position_x").particle_type
-    assert ug1._get_field_info("all", "particle_position_y").particle_type
-    assert ug1._get_field_info("all", "particle_position_z").particle_type
-    assert ug1._get_field_info("all", "particle_mass").particle_type
+    for ptype in ("all", "io"):
+        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug1._get_field_info(ptype, "particle_mass").particle_type
     assert not ug1._get_field_info("gas", "Density").particle_type
 
-    assert ug2._get_field_info("all", "particle_position_x").particle_type
-    assert ug2._get_field_info("all", "particle_position_y").particle_type
-    assert ug2._get_field_info("all", "particle_position_z").particle_type
-    assert ug2._get_field_info("all", "particle_mass").particle_type
+    for ptype in ("all", "io"):
+        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug2._get_field_info(ptype, "particle_mass").particle_type
     assert not ug2._get_field_info("gas", "Density").particle_type
     
     # Now refine this


https://bitbucket.org/yt_analysis/yt/commits/1a3764bbb7b1/
Changeset:   1a3764bbb7b1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 16:31:57
Summary:     Changes to setup per particle type in stream.
Affected #:  2 files

diff -r 8ed00de661e4d9e745b6d21e22a1fd9bd99dde21 -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -66,7 +66,8 @@
 from .fields import \
     StreamFieldInfo, \
     add_stream_field, \
-    KnownStreamFields
+    KnownStreamFields, \
+    _setup_particle_fields
 
 class StreamGrid(AMRGridPatch):
     """
@@ -325,13 +326,7 @@
 
     def _setup_particle_type(self, ptype):
         orig = set(self.field_info.items())
-        particle_vector_functions(ptype,
-            ["particle_position_%s" % ax for ax in 'xyz'],
-            ["particle_velocity_%s" % ax for ax in 'xyz'],
-            self.field_info)
-        particle_deposition_functions(ptype,
-            "Coordinates", "particle_mass", self.field_info)
-        standard_particle_fields(self.field_info, ptype)
+        _setup_particle_fields(self.field_info, ptype)
         return [n for n, v in set(self.field_info.items()).difference(orig)]
 
 class StreamDictFieldHandler(dict):
@@ -343,6 +338,19 @@
         fields = list(set(fields))
         return fields
 
+def update_field_names(data):
+    orig_names = data.keys()
+    for k in orig_names:
+        if isinstance(k, tuple): continue
+        s = getattr(data[k], "shape", ())
+        if len(s) == 1:
+            field = ("io", k)
+        elif len(s) == 3:
+            field = ("gas", k)
+        else:
+            raise NotImplementedError
+        data[field] = data.pop(k)
+
 def set_particle_types(data) :
 
     particle_types = {}
@@ -613,6 +621,7 @@
         grid_levels[i,:] = g.pop("level")
         if g.has_key("number_of_particles") :
             number_of_particles[i,:] = g.pop("number_of_particles")  
+        update_field_names(g)
         sfh[i] = g
             
     handler = StreamHandler(
@@ -685,7 +694,10 @@
     if number_of_particles > 0 :
         pdata = {}
         for field in base_pf.h.field_list :
-            if base_pf.field_info[field].particle_type :
+            if not isinstance(field, tuple):
+                field = ("unknown", field)
+            fi = base_pf._get_field_info(*field)
+            if fi.particle_type :
                 pdata[field] = np.concatenate([grid[field]
                                                for grid in base_pf.h.grids])
         pdata["number_of_particles"] = number_of_particles
@@ -708,7 +720,10 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                if not pf._get_field_info(field).particle_type :
+                if not isinstance(field, tuple):
+                    field = ("unknown", field)
+                fi = pf._get_field_info(*field)
+                if fi.particle_type :
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
@@ -721,7 +736,10 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    if not pf.field_info[field].particle_type :
+                    if not isinstance(field, tuple):
+                        field = ("unknown", field)
+                    fi = pf._get_field_info(*field)
+                    if fi.particle_type :
                         gd[field] = grid[field]
                 grid_data.append(gd)
         
@@ -838,6 +856,7 @@
         pdata[field] = data[key]
         sfh._additional_fields += (field,)
     data = pdata # Drop reference count
+    update_field_names(data)
     particle_types = set_particle_types(data)
 
     sfh.update({'stream_file':data})
@@ -1024,6 +1043,7 @@
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+    update_field_names(data)
 
     sfh = StreamDictFieldHandler()
     

diff -r 8ed00de661e4d9e745b6d21e22a1fd9bd99dde21 -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -41,30 +41,14 @@
 
 add_field("Density", function = TranslationFunc("density"))
 
-add_stream_field("particle_position_x", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_y", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_z", function = NullFunc, particle_type=True)
-add_stream_field("particle_index", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_density", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_temperature", function = NullFunc, particle_type=True)
-add_stream_field("particle_mass", function = NullFunc, particle_type=True)
-
-add_field("ParticleMass", function = TranslationFunc("particle_mass"),
-          particle_type=True)
-
-add_stream_field(("all", "particle_position_x"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_y"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_z"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_index"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_density"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_temperature"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_mass"), function = NullFunc, particle_type=True)
-
-add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
-          particle_type=True)
-
-particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
-                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
-                          StreamFieldInfo)
-particle_deposition_functions("all", "Coordinates", "ParticleMass",
-                               StreamFieldInfo)
+def _setup_particle_fields(registry, ptype):
+    particle_vector_functions(ptype,
+        ["particle_position_%s" % ax for ax in 'xyz'],
+        ["particle_velocity_%s" % ax for ax in 'xyz'],
+        registry)
+    particle_deposition_functions(ptype,
+        "Coordinates", "particle_mass", registry)
+    for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+              ["particle_type", "particle_index", "ParticleMass"] + \
+              ["particle_position_%s" % ax for ax in 'xyz']:
+        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)


https://bitbucket.org/yt_analysis/yt/commits/32c0504f26cf/
Changeset:   32c0504f26cf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 16:39:46
Summary:     Adding more tests, attempting to fix streaming tests.
Affected #:  6 files

diff -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 -r 32c0504f26cf7929357075d6971a42c95b1dd51b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -482,6 +482,7 @@
                 pdata[field] = data.pop(key)
     else :
         particle_types = {}
+    update_field_names(data)
     
     if nprocs > 1:
         temp = {}

diff -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 -r 32c0504f26cf7929357075d6971a42c95b1dd51b yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,13 +34,11 @@
 StreamFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = StreamFieldInfo.add_field
 
-add_stream_field("density", function = NullFunc)
+add_stream_field("Density", function = NullFunc)
 add_stream_field("x-velocity", function = NullFunc)
 add_stream_field("y-velocity", function = NullFunc)
 add_stream_field("z-velocity", function = NullFunc)
 
-add_field("Density", function = TranslationFunc("density"))
-
 def _setup_particle_fields(registry, ptype):
     particle_vector_functions(ptype,
         ["particle_position_%s" % ax for ax in 'xyz'],

diff -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 -r 32c0504f26cf7929357075d6971a42c95b1dd51b yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -62,7 +62,7 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    ds = self.fields[g.id][fname]
+                    ds = self.fields[g.id][field]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
 

diff -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 -r 32c0504f26cf7929357075d6971a42c95b1dd51b yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -28,7 +28,6 @@
     
     # Check that all of this runs ok without particles
     
-    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
     ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
     amr0 = refine_amr(ug0, rc, fo, 3)
 

diff -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 -r 32c0504f26cf7929357075d6971a42c95b1dd51b yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -91,6 +91,8 @@
         bi = dd["io","mesh_id"]
         v = np.bincount(bi.astype("int64"))
         yield assert_equal, v.max() <= n_ref, True
+        bi2 = dd["all","mesh_id"]
+        yield assert_equal, bi, bi2
 
 def test_particle_overrefine():
     np.random.seed(int(0x4d3d3d3))

diff -r 1a3764bbb7b1319d569adf1f08728dcc4952e7e6 -r 32c0504f26cf7929357075d6971a42c95b1dd51b yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -34,7 +34,7 @@
         if sub_select is not None:
             ind &= sub_select
         for field, val in self.fields.iteritems():
-            grid[field][ind] = val
+            grid.field_data[field][ind] = val
 
 class CoredSphere(FluidOperator):
     def __init__(self, core_radius, radius, center, fields):


https://bitbucket.org/yt_analysis/yt/commits/6d8ceaa3e7e1/
Changeset:   6d8ceaa3e7e1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 16:56:20
Summary:     Fix a few minor logical mistakes.
Affected #:  2 files

diff -r 32c0504f26cf7929357075d6971a42c95b1dd51b -r 6d8ceaa3e7e1ff23b9be22eba667c43cbf045c25 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -724,7 +724,7 @@
                 if not isinstance(field, tuple):
                     field = ("unknown", field)
                 fi = pf._get_field_info(*field)
-                if fi.particle_type :
+                if not fi.particle_type :
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
@@ -740,7 +740,7 @@
                     if not isinstance(field, tuple):
                         field = ("unknown", field)
                     fi = pf._get_field_info(*field)
-                    if fi.particle_type :
+                    if not fi.particle_type :
                         gd[field] = grid[field]
                 grid_data.append(gd)
         

diff -r 32c0504f26cf7929357075d6971a42c95b1dd51b -r 6d8ceaa3e7e1ff23b9be22eba667c43cbf045c25 yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -34,7 +34,7 @@
         if sub_select is not None:
             ind &= sub_select
         for field, val in self.fields.iteritems():
-            grid.field_data[field][ind] = val
+            grid[field][ind] = val
 
 class CoredSphere(FluidOperator):
     def __init__(self, core_radius, radius, center, fields):


https://bitbucket.org/yt_analysis/yt/commits/d5d1df0de611/
Changeset:   d5d1df0de611
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 17:02:10
Summary:     Fixing the refine_amr tests.
Affected #:  2 files

diff -r 6d8ceaa3e7e1ff23b9be22eba667c43cbf045c25 -r d5d1df0de61155dedf86832c118eb6a3d8e4bab3 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -247,6 +247,7 @@
         alone. 
         """
         
+        [update_field_names(d) for d in data]
         particle_types = set_particle_types(data[0])
         ftype = "io"
 
@@ -347,6 +348,8 @@
             field = ("io", k)
         elif len(s) == 3:
             field = ("gas", k)
+        elif len(s) == 0:
+            continue
         else:
             raise NotImplementedError
         data[field] = data.pop(k)

diff -r 6d8ceaa3e7e1ff23b9be22eba667c43cbf045c25 -r d5d1df0de61155dedf86832c118eb6a3d8e4bab3 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -48,5 +48,6 @@
         "Coordinates", "particle_mass", registry)
     for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
               ["particle_type", "particle_index", "ParticleMass"] + \
+              ["particle_mass"] + \
               ["particle_position_%s" % ax for ax in 'xyz']:
         registry.add_field((ptype, fn), function=NullFunc, particle_type=True)


https://bitbucket.org/yt_analysis/yt/commits/2eba8442af3f/
Changeset:   2eba8442af3f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 17:19:36
Summary:     Set up Stream field list correctly.
Affected #:  3 files

diff -r d5d1df0de61155dedf86832c118eb6a3d8e4bab3 -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -32,6 +32,7 @@
         fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
+            fi = pf._get_field_info("unknown", cut_field)
             yield assert_equal, frb[cut_field].info['data_source'], \
                 cut.__str__()
             yield assert_equal, frb[cut_field].info['axis'], \
@@ -39,7 +40,7 @@
             yield assert_equal, frb[cut_field].info['field'], \
                 cut_field
             yield assert_equal, frb[cut_field].info['units'], \
-                pf.field_info[cut_field].get_units()
+                fi.get_units()
             yield assert_equal, frb[cut_field].info['xlim'], \
                 frb.bounds[:2]
             yield assert_equal, frb[cut_field].info['ylim'], \

diff -r d5d1df0de61155dedf86832c118eb6a3d8e4bab3 -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -225,7 +225,12 @@
         GridGeometryHandler._setup_classes(self, dd)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
 
     def _populate_grid_objects(self):
         for g in self.grids:

diff -r d5d1df0de61155dedf86832c118eb6a3d8e4bab3 -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -40,14 +40,14 @@
 add_stream_field("z-velocity", function = NullFunc)
 
 def _setup_particle_fields(registry, ptype):
+    for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+              ["particle_type", "particle_index", "particle_mass"] + \
+              ["particle_position_%s" % ax for ax in 'xyz'] + \
+              ["particle_velocity_%s" % ax for ax in 'xyz']:
+        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
     particle_vector_functions(ptype,
         ["particle_position_%s" % ax for ax in 'xyz'],
         ["particle_velocity_%s" % ax for ax in 'xyz'],
         registry)
     particle_deposition_functions(ptype,
         "Coordinates", "particle_mass", registry)
-    for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
-              ["particle_type", "particle_index", "ParticleMass"] + \
-              ["particle_mass"] + \
-              ["particle_position_%s" % ax for ax in 'xyz']:
-        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)


https://bitbucket.org/yt_analysis/yt/commits/1445ee994bde/
Changeset:   1445ee994bde
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 17:35:47
Summary:     Utilize field tuples more uniformly.
Affected #:  4 files

diff -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 -r 1445ee994bdeb47cf262d7f0ad3afecfc59a5283 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -46,6 +46,7 @@
                 fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
+                    fi = pf._get_field_info(proj_field)
                     yield assert_equal, frb[proj_field].info['data_source'], \
                             proj.__str__()
                     yield assert_equal, frb[proj_field].info['axis'], \
@@ -53,7 +54,7 @@
                     yield assert_equal, frb[proj_field].info['field'], \
                             proj_field
                     yield assert_equal, frb[proj_field].info['units'], \
-                            pf.field_info[proj_field].get_units()
+                            fi.get_units()
                     yield assert_equal, frb[proj_field].info['xlim'], \
                             frb.bounds[:2]
                     yield assert_equal, frb[proj_field].info['ylim'], \

diff -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 -r 1445ee994bdeb47cf262d7f0ad3afecfc59a5283 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -67,6 +67,7 @@
                 fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
+                    fi = pf._get_field_info(slc_field)
                     yield assert_equal, frb[slc_field].info['data_source'], \
                         slc.__str__()
                     yield assert_equal, frb[slc_field].info['axis'], \
@@ -74,7 +75,7 @@
                     yield assert_equal, frb[slc_field].info['field'], \
                         slc_field
                     yield assert_equal, frb[slc_field].info['units'], \
-                        pf.field_info[slc_field].get_units()
+                        fi.get_units()
                     yield assert_equal, frb[slc_field].info['xlim'], \
                         frb.bounds[:2]
                     yield assert_equal, frb[slc_field].info['ylim'], \

diff -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 -r 1445ee994bdeb47cf262d7f0ad3afecfc59a5283 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1000,7 +1000,13 @@
         super(StreamOctreeHandler, self)._setup_classes(dd)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
+
 
 class StreamOctreeStaticOutput(StreamStaticOutput):
     _hierarchy_class = StreamOctreeHandler
@@ -1141,7 +1147,13 @@
             self.io = io_registry[self.data_style](self.pf)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
+
 
 class StreamHexahedralStaticOutput(StreamStaticOutput):
     _hierarchy_class = StreamHexahedralHierarchy

diff -r 2eba8442af3fcbd40c3777d1194fe84397b6fe33 -r 1445ee994bdeb47cf262d7f0ad3afecfc59a5283 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -59,7 +59,9 @@
         A dictionary of field parameters to set.
     """
 
-    field_obj = pf.field_info[field_name]
+    if isinstance(field_name, tuple):
+        field_name = field_name[0]
+    field_obj = pf._get_field_info(field_name)
     if field_obj.particle_type:
         print( "Saving particle fields currently not supported." )
         return
@@ -84,6 +86,9 @@
     # add field info to field_types group
     g = fhandle["field_types"]
     # create the subgroup with the field's name
+    if isinstance(field_name, tuple):
+        field_name = field_name[0]
+    fi = pf._get_field_info(field_name)
     try:
         sg = g.create_group(field_name)
     except ValueError:
@@ -91,8 +96,8 @@
         sys.exit(1)
         
     # grab the display name and units from the field info container.
-    display_name = pf.field_info[field_name].display_name
-    units = pf.field_info[field_name].get_units()
+    display_name = fi.display_name
+    units = fi.get_units()
 
     # check that they actually contain something...
     if display_name:
@@ -122,9 +127,8 @@
         pt_group = particles_group[particle_type_name]
         # add the field data to the grid group
         # Check if this is a real field or particle data.
-        field_obj = pf.field_info[field_name]
         grid.get_data(field_name)
-        if field_obj.particle_type:  # particle data
+        if fi.particle_type:  # particle data
             pt_group[field_name] = grid[field_name]
         else:  # a field
             grid_group[field_name] = grid[field_name]


https://bitbucket.org/yt_analysis/yt/commits/23a808960cf0/
Changeset:   23a808960cf0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 17:46:56
Summary:     Remove deposit special case for Stream.
Affected #:  1 file

diff -r 1445ee994bdeb47cf262d7f0ad3afecfc59a5283 -r 23a808960cf0d1a96a05b5fdb296fd97d7ea045f yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -46,7 +46,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype not in ("gas", "deposit") for ftype, fname in fields)):
+        if any((ftype not in ("gas",) for ftype, fname in fields)):
             raise NotImplementedError
         rv = {}
         for field in fields:
@@ -57,8 +57,6 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
-            if ftype == 'deposit':
-                fname = field
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:


https://bitbucket.org/yt_analysis/yt/commits/b74c0e2cdd29/
Changeset:   b74c0e2cdd29
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 17:59:13
Summary:     Attempt to update our field detection to be more like unitrefactor's.
Affected #:  1 file

diff -r 23a808960cf0d1a96a05b5fdb296fd97d7ea045f -r b74c0e2cdd298306a6630a9d27d1a5fa87faf7ee yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -21,17 +21,24 @@
     cp_z_vec = np.array((0.0, 0.0, 1.0)),
 )
 
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+_base_fields = (("gas", "Density"),
+                ("gas", "x-velocity"),
+                ("gas", "y-velocity"),
+                ("gas", "z-velocity"))
+_base_field_names = [f[1] for f in _base_fields]
 
 def realistic_pf(fields, nprocs):
     np.random.seed(int(0x4d3d3d3))
-    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    fields = list(set([_strip_ftype(f) for f in fields]))
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs,
+                        particles = 16**3)
     pf.parameters["HydroMethod"] = "streaming"
     pf.parameters["Gamma"] = 5.0/3.0
     pf.parameters["EOSType"] = 1.0
     pf.parameters["EOSSoundSpeed"] = 1.0
     pf.conversion_factors["Time"] = 1.0
     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.gamma = 5.0/3.0
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
     pf.omega_matter = 0.27
@@ -41,6 +48,20 @@
         pf.units[unit+'hcm'] = pf.units[unit]
     return pf
 
+def _strip_ftype(field):
+    if not isinstance(field, tuple):
+        return field
+    elif field[0] == "all":
+        return field
+    return field[1]
+
+def _expand_field(field):
+    if isinstance(field, tuple):
+        return field
+    if "particle" in field:
+        return ("all", field)
+    return ("gas", field)
+
 class TestFieldAccess(object):
     description = None
 
@@ -52,8 +73,13 @@
 
     def __call__(self):
         field = FieldInfo[self.field_name]
+        # Don't test the base fields
+        if field in _base_fields or field in _base_field_names: return
         deps = field.get_dependencies()
-        fields = list(set(deps.requested + _base_fields))
+        fields = set([])
+        for f in deps.requested + list(_base_fields):
+            fields.add(_expand_field(f))
+        fields = list(fields)
         skip_grids = False
         needs_spatial = False
         for v in field.validators:
@@ -85,7 +111,7 @@
                 assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
 
 def test_all_fields():
-    for field in FieldInfo:
+    for field in sorted(FieldInfo):
         if isinstance(field, types.TupleType):
             fname = field[0]
         else:


https://bitbucket.org/yt_analysis/yt/commits/f68dd2bd922b/
Changeset:   f68dd2bd922b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 18:18:58
Summary:     Add backup field detection.  We should have done this a long time ago.
Affected #:  2 files

diff -r b74c0e2cdd298306a6630a9d27d1a5fa87faf7ee -r f68dd2bd922b48c0117bb5a6ca497bdb973c28fc yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -503,7 +503,16 @@
                 continue
             fd = self.pf.field_dependencies.get(field, None) or \
                  self.pf.field_dependencies.get(field[1], None)
-            if fd is None: continue
+            # This is long overdue.  Any time we *can't* find a field
+            # dependency -- for instance, if the derived field has been added
+            # after parameter file instantiation -- let's just try to
+            # recalculate it.
+            if fd is None:
+                try:
+                    fd = fi.get_dependencies(pf = self.pf)
+                    self.pf.field_dependencies[field] = fd
+                except:
+                    continue
             requested = self._determine_fields(list(set(fd.requested)))
             deps = [d for d in requested if d not in fields_to_get]
             fields_to_get += deps

diff -r b74c0e2cdd298306a6630a9d27d1a5fa87faf7ee -r f68dd2bd922b48c0117bb5a6ca497bdb973c28fc yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -31,7 +31,7 @@
     np.random.seed(int(0x4d3d3d3))
     fields = list(set([_strip_ftype(f) for f in fields]))
     pf = fake_random_pf(16, fields = fields, nprocs = nprocs,
-                        particles = 16**3)
+                        particles = 4**3)
     pf.parameters["HydroMethod"] = "streaming"
     pf.parameters["Gamma"] = 5.0/3.0
     pf.parameters["EOSType"] = 1.0
@@ -119,13 +119,16 @@
         if fname.startswith("CuttingPlane"): continue
         if fname.startswith("particle"): continue
         if fname.startswith("CIC"): continue
-        if field.startswith("BetaPar"): continue
-        if field.startswith("TBetaPar"): continue
-        if field.startswith("BetaPerp"): continue
+        if fname.startswith("BetaPar"): continue
+        if fname.startswith("TBetaPar"): continue
+        if fname.startswith("BetaPerp"): continue
         if fname.startswith("WeakLensingConvergence"): continue
         if fname.startswith("DensityPerturbation"): continue
         if fname.startswith("Matter_Density"): continue
         if fname.startswith("Overdensity"): continue
+        # TotalMass is disabled because of issues with mixed particle/fluid
+        # field detection in current field system.
+        if fname.startswith("TotalMass"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             test_all_fields.__name__ = "%s_%s" % (field, nproc)


https://bitbucket.org/yt_analysis/yt/commits/ef8ae8d97fd8/
Changeset:   ef8ae8d97fd8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 18:56:53
Summary:     We want the second component of the tuple.
Affected #:  1 file

diff -r f68dd2bd922b48c0117bb5a6ca497bdb973c28fc -r ef8ae8d97fd879936c548bf9258655156038905d yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -60,7 +60,7 @@
     """
 
     if isinstance(field_name, tuple):
-        field_name = field_name[0]
+        field_name = field_name[1]
     field_obj = pf._get_field_info(field_name)
     if field_obj.particle_type:
         print( "Saving particle fields currently not supported." )
@@ -87,7 +87,7 @@
     g = fhandle["field_types"]
     # create the subgroup with the field's name
     if isinstance(field_name, tuple):
-        field_name = field_name[0]
+        field_name = field_name[1]
     fi = pf._get_field_info(field_name)
     try:
         sg = g.create_group(field_name)


https://bitbucket.org/yt_analysis/yt/commits/ea2613f3755d/
Changeset:   ea2613f3755d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-19 18:57:14
Summary:     Fixing ParticleGenerator with particle type names.
Affected #:  3 files

diff -r ef8ae8d97fd879936c548bf9258655156038905d -r ea2613f3755d342920ff03b354fc60fcf6b57eb0 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,8 @@
     YTFieldData, \
     YTDataContainer, \
     YTSelectionContainer
+from yt.data_objects.particle_unions import \
+    ParticleUnion
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.geometry_handler import \
@@ -251,7 +253,6 @@
         already in the stream but not part of the data dict will be left
         alone. 
         """
-        
         [update_field_names(d) for d in data]
         particle_types = set_particle_types(data[0])
         ftype = "io"
@@ -259,9 +260,7 @@
         for key in data[0].keys() :
             if key is "number_of_particles": continue
             self.stream_handler.particle_types[key] = particle_types[key]
-            if key not in self.field_list:
-                self.field_list.append(key)
-                
+
         for i, grid in enumerate(self.grids) :
             if data[i].has_key("number_of_particles") :
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
@@ -272,7 +271,13 @@
                     grid.field_data.pop( ("io", fname) )
                 self.stream_handler.fields[grid.id][fname] = data[i][fname]
             
+
+        # We only want to create a superset of fields here.
         self._detect_fields()
+        mylog.debug("Creating Particle Union 'all'")
+        pu = ParticleUnion("all", list(self.pf.particle_types_raw))
+        self.pf.add_particle_union(pu)
+        self.pf.particle_types = tuple(set(self.pf.particle_types))
         self._setup_unknown_fields()
                 
 class StreamStaticOutput(StaticOutput):

diff -r ef8ae8d97fd879936c548bf9258655156038905d -r ea2613f3755d342920ff03b354fc60fcf6b57eb0 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -5,9 +5,9 @@
 
 class ParticleGenerator(object) :
 
-    default_fields = ["particle_position_x",
-                      "particle_position_y",
-                      "particle_position_z"]
+    default_fields = [("io", "particle_position_x"),
+                      ("io", "particle_position_y"),
+                      ("io", "particle_position_z")]
     
     def __init__(self, pf, num_particles, field_list) :
         """
@@ -20,7 +20,7 @@
         self.pf = pf
         self.num_particles = num_particles
         self.field_list = field_list
-        self.field_list.append("particle_index")
+        self.field_list.append(("io", "particle_index"))
         
         try :
             self.posx_index = self.field_list.index(self.default_fields[0])
@@ -28,9 +28,8 @@
             self.posz_index = self.field_list.index(self.default_fields[2])
         except :
             raise KeyError("Field list must contain the following fields: " +
-                           "\'particle_position_x\', \'particle_position_y\'" +
-                           ", \'particle_position_z\' ")
-        self.index_index = self.field_list.index("particle_index")
+                           "\n".join(self.default_fields))
+        self.index_index = self.field_list.index(("io", "particle_index"))
         
         self.num_grids = self.pf.h.num_grids
         self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
@@ -212,9 +211,9 @@
         """
 
         field_list = data.keys()
-        x = data.pop("particle_position_x")
-        y = data.pop("particle_position_y")
-        z = data.pop("particle_position_z")
+        x = data.pop(("io", "particle_position_x"))
+        y = data.pop(("io", "particle_position_y"))
+        z = data.pop(("io", "particle_position_z"))
 
         xcond = np.logical_or(x < pf.domain_left_edge[0],
                               x >= pf.domain_right_edge[0])

diff -r ef8ae8d97fd879936c548bf9258655156038905d -r ea2613f3755d342920ff03b354fc60fcf6b57eb0 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -23,11 +23,13 @@
 
     # Now generate particles from density
 
-    field_list = ["particle_position_x","particle_position_y",
-                  "particle_position_z","particle_index",
-                  "particle_gas_density"]
+    field_list = [("io", "particle_position_x"),
+                  ("io", "particle_position_y"),
+                  ("io", "particle_position_z"),
+                  ("io", "particle_index"),
+                  ("io", "particle_gas_density")]
     num_particles = 1000000
-    field_dict = {"Density": "particle_gas_density"}
+    field_dict = {("gas", "Density"): ("io", "particle_gas_density")}
     sphere = pf.h.sphere(pf.domain_center, 0.45)
 
     particles1 = WithDensityParticleGenerator(pf, sphere, num_particles, field_list)
@@ -41,6 +43,8 @@
     particles_per_grid1 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
     yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
 
+    tags = np.concatenate([grid["particle_index"] for grid in pf.h.grids])
+    assert(np.unique(tags).size == num_particles)
     # Set up a lattice of particles
     pdims = np.array([64,64,64])
     def new_indices() :
@@ -48,31 +52,33 @@
         return np.arange((np.product(pdims)))+num_particles
     le = np.array([0.25,0.25,0.25])
     re = np.array([0.75,0.75,0.75])
-    new_field_list = field_list + ["particle_gas_temperature"]
-    new_field_dict = {"Density": "particle_gas_density",
-                      "Temperature": "particle_gas_temperature"}
+    new_field_list = field_list + [("io", "particle_gas_temperature")]
+    new_field_dict = {("gas", "Density"): ("io", "particle_gas_density"),
+                      ("gas", "Temperature"): ("io", "particle_gas_temperature")}
 
     particles2 = LatticeParticleGenerator(pf, pdims, le, re, new_field_list)
     particles2.assign_indices(function=new_indices)
     particles2.map_grid_fields_to_particles(new_field_dict)
 
     #Test lattice positions
-    xpos = np.unique(particles2["particle_position_x"])
-    ypos = np.unique(particles2["particle_position_y"])
-    zpos = np.unique(particles2["particle_position_z"])
+    xpos = np.unique(particles2["io", "particle_position_x"])
+    ypos = np.unique(particles2["io", "particle_position_y"])
+    zpos = np.unique(particles2["io", "particle_position_z"])
 
     xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
     ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
     zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
 
-    yield assert_almost_equal, xpos, xpred
-    yield assert_almost_equal, ypos, ypred
-    yield assert_almost_equal, zpos, zpred
+    assert_almost_equal( xpos, xpred)
+    assert_almost_equal( ypos, ypred)
+    assert_almost_equal( zpos, zpred)
 
     #Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in pf.h.grids]
     yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
+
+    [grid.field_data.clear() for grid in pf.h.grids]
     particles_per_grid2 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
     yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
 


https://bitbucket.org/yt_analysis/yt/commits/2f4e0fdd8730/
Changeset:   2f4e0fdd8730
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-21 20:44:05
Summary:     Updating ProfilePlotter and PhasePlotter for field specifications.
Affected #:  2 files

diff -r ea2613f3755d342920ff03b354fc60fcf6b57eb0 -r 2f4e0fdd8730a550db7eec287b207e23dd4447ad yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -978,11 +978,15 @@
         cls = Profile3D
     else:
         raise NotImplementedError
+    bin_fields = data_source._determine_fields(bin_fields)
+    fields = data_source._determine_fields(fields)
+    if weight_field is not None:
+        weight_field, = data_source._determine_fields([weight_field])
     if not iterable(n):
         n = [n] * len(bin_fields)
     if not iterable(accumulation):
         accumulation = [accumulation] * len(bin_fields)
-    logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+    logs = [data_source.pf._get_field_info(f).take_log for f in bin_fields]
     ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
           for f, l in zip(bin_fields, logs)]
     args = [data_source]

diff -r ea2613f3755d342920ff03b354fc60fcf6b57eb0 -r 2f4e0fdd8730a550db7eec287b207e23dd4447ad yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -119,7 +119,7 @@
     
     Parameters
     ----------
-    data_source : AMR4DData Object
+    data_source : AMR3DData Object
         The data object to be profiled, such as all_data, region, or 
         sphere.
     x_field : str
@@ -246,13 +246,18 @@
             name = "%s.png" % prefix
         suffix = get_image_suffix(name)
         prefix = name[:name.rfind(suffix)]
+        xfn = self.profiles[0].x_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
         if not suffix:
             suffix = ".png"
         canvas_cls = get_canvas(name)
         for uid, fig in iters:
+            if isinstance(uid, types.TupleType):
+                uid = uid[1]
             canvas = canvas_cls(fig)
             fn = "%s_1d-Profile_%s_%s%s" % \
-              (prefix, self.profiles[0].x_field, uid, suffix)
+              (prefix, xfn, uid, suffix)
             mylog.info("Saving %s", fn)
             canvas.print_figure(fn)
         return self
@@ -410,7 +415,8 @@
             
     def _get_field_log(self, field_y, profile):
         pf = profile.data_source.pf
-        yfi = pf.field_info[field_y]
+        yf, = profile.data_source._determine_fields([field_y])
+        yfi = pf._get_field_info(*yf)
         if self.x_log is None:
             x_log = profile.x_log
         else:
@@ -425,6 +431,7 @@
     def _get_field_label(self, field, field_info):
         units = field_info.get_units()
         field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
         if field_name is None:
             field_name = r'$\rm{'+field+r'}$'
         elif field_name.find('$') == -1:
@@ -438,8 +445,10 @@
     def _get_field_title(self, field_y, profile):
         pf = profile.data_source.pf
         field_x = profile.x_field
-        xfi = pf.field_info[field_x]
-        yfi = pf.field_info[field_y]
+        xf, yf = profile.data_source._determine_fields(
+            [field_x, field_y])
+        xfi = pf._get_field_info(*xf)
+        yfi = pf._get_field_info(*yf)
         x_title = self.x_title or self._get_field_label(field_x, xfi)
         y_title = self.y_title.get(field_y, None) or \
                     self._get_field_label(field_y, yfi)
@@ -554,9 +563,11 @@
         pf = profile.data_source.pf
         field_x = profile.x_field
         field_y = profile.y_field
-        xfi = pf.field_info[field_x]
-        yfi = pf.field_info[field_y]
-        zfi = pf.field_info[field_z]
+        xf, yf, zf = profile.data_source._determine_fields(
+            [field_x, field_y, field_z])
+        xfi = pf._get_field_info(*xf)
+        yfi = pf._get_field_info(*yf)
+        zfi = pf._get_field_info(*zf)
         x_title = self.x_title or self._get_field_label(field_x, xfi)
         y_title = self.y_title or self._get_field_label(field_y, yfi)
         z_title = self.z_title.get(field_z, None) or \
@@ -566,6 +577,7 @@
     def _get_field_label(self, field, field_info):
         units = field_info.get_units()
         field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
         if field_name is None:
             field_name = r'$\rm{'+field+r'}$'
         elif field_name.find('$') == -1:
@@ -578,7 +590,8 @@
         
     def _get_field_log(self, field_z, profile):
         pf = profile.data_source.pf
-        zfi = pf.field_info[field_z]
+        zf, = profile.data_source._determine_fields([field_z])
+        zfi = pf._get_field_info(*zf)
         if self.x_log is None:
             x_log = profile.x_log
         else:
@@ -659,9 +672,16 @@
 
         if not self._plot_valid: self._setup_plots()
         if mpl_kwargs is None: mpl_kwargs = {}
+        xfn = self.profile.x_field
+        yfn = self.profile.y_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
+        if isinstance(yfn, types.TupleType):
+            yfn = yfn[1]
         for f in self.profile.field_data:
-            middle = "2d-Profile_%s_%s_%s" % (self.profile.x_field, 
-                                              self.profile.y_field, f)
+            _f = f
+            if isinstance(f, types.TupleType): _f = _f[1]
+            middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
             if name is None:
                 prefix = self.profile.pf
                 name = "%s.png" % prefix


https://bitbucket.org/yt_analysis/yt/commits/bc9faeefee77/
Changeset:   bc9faeefee77
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-31 23:59:16
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #679)

Fixing ProfileND and tests in yt-3.0 branch
Affected #:  20 files

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -32,7 +32,10 @@
     BinnedProfile1D, \
     BinnedProfile2D, \
     BinnedProfile3D, \
-    create_profile
+    create_profile, \
+    Profile1D, \
+    Profile2D, \
+    Profile3D
 
 from time_series import \
     TimeSeriesData, \

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -503,7 +503,16 @@
                 continue
             fd = self.pf.field_dependencies.get(field, None) or \
                  self.pf.field_dependencies.get(field[1], None)
-            if fd is None: continue
+            # This is long overdue.  Any time we *can't* find a field
+            # dependency -- for instance, if the derived field has been added
+            # after parameter file instantiation -- let's just try to
+            # recalculate it.
+            if fd is None:
+                try:
+                    fd = fi.get_dependencies(pf = self.pf)
+                    self.pf.field_dependencies[field] = fd
+                except:
+                    continue
             requested = self._determine_fields(list(set(fd.requested)))
             deps = [d for d in requested if d not in fields_to_get]
             fields_to_get += deps

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -757,8 +757,10 @@
     def add_fields(self, fields):
         fields = ensure_list(fields)
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
-        for g in parallel_objects(self.data_source._grids):
-            self._bin_grid(g, fields, temp_storage)
+        cfields = fields + list(self.bin_fields)
+        citer = self.data_source.chunks(cfields, "io")
+        for chunk in parallel_objects(citer):
+            self._bin_chunk(chunk, fields, temp_storage)
         self._finalize_storage(fields, temp_storage)
 
     def _finalize_storage(self, fields, temp_storage):
@@ -772,42 +774,35 @@
             self.field_data[field] = temp_storage.values[...,i]
             self.field_data[field][blank] = 0.0
         
-    def _bin_grid(self, grid, fields, storage):
+    def _bin_chunk(self, chunk, fields, storage):
         raise NotImplementedError
 
-    def _filter(self, bin_fields, cut_points):
-        # cut_points is initially just the points inside our region
+    def _filter(self, bin_fields):
+        # cut_points is set to be everything initially, but
         # we also want to apply a filtering based on min/max
-        filter = np.zeros(bin_fields[0].shape, dtype='bool')
-        filter[cut_points] = True
+        filter = np.ones(bin_fields[0].shape, dtype='bool')
         for (mi, ma), data in zip(self.bounds, bin_fields):
             filter &= (data > mi)
             filter &= (data < ma)
         return filter, [data[filter] for data in bin_fields]
         
-    def _get_data(self, grid, fields):
-        # Save the values in the grid beforehand.
-        old_params = grid.field_parameters
-        old_keys = grid.field_data.keys()
-        grid.field_parameters = self.data_source.field_parameters
-        # Now we ask our source which values to include
-        pointI = self.data_source._get_point_indices(grid)
-        bin_fields = [grid[bf] for bf in self.bin_fields]
+    def _get_data(self, chunk, fields):
+        # We are using chunks now, which will manage the field parameters and
+        # the like.
+        bin_fields = [chunk[bf] for bf in self.bin_fields]
         # We want to make sure that our fields are within the bounds of the
         # binning
-        filter, bin_fields = self._filter(bin_fields, pointI)
+        filter, bin_fields = self._filter(bin_fields)
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            arr[:,i] = grid[field][filter]
+            arr[:,i] = chunk[field][filter]
         if self.weight_field is not None:
-            weight_data = grid[self.weight_field]
+            weight_data = chunk[self.weight_field]
         else:
-            weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+            weight_data = np.ones(chunk.ires.size, dtype="float64")
         weight_data = weight_data[filter]
         # So that we can pass these into 
-        grid.field_parameters = old_params
-        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
         return arr, weight_data, bin_fields
 
     def __getitem__(self, key):
@@ -835,10 +830,10 @@
         self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
         self.x = self.x_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        gd = self._get_data(grid, fields)
-        if gd is None: return
-        fdata, wdata, (bf_x,) = gd
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
+        if rv is None: return
+        fdata, wdata, (bf_x,) = rv
         bin_ind = np.digitize(bf_x, self.x_bins) - 1
         new_bin_profile1d(bin_ind, wdata, fdata,
                       storage.weight_values, storage.values,
@@ -867,8 +862,8 @@
         self.x = self.x_bins
         self.y = self.y_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        rv = self._get_data(grid, fields)
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
         if rv is None: return
         fdata, wdata, (bf_x, bf_y) = rv
         bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -912,8 +907,8 @@
         self.y = self.y_bins
         self.z = self.z_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        rv = self._get_data(grid, fields)
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
         if rv is None: return
         fdata, wdata, (bf_x, bf_y, bf_z) = rv
         bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -983,11 +978,15 @@
         cls = Profile3D
     else:
         raise NotImplementedError
+    bin_fields = data_source._determine_fields(bin_fields)
+    fields = data_source._determine_fields(fields)
+    if weight_field is not None:
+        weight_field, = data_source._determine_fields([weight_field])
     if not iterable(n):
         n = [n] * len(bin_fields)
     if not iterable(accumulation):
         accumulation = [accumulation] * len(bin_fields)
-    logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+    logs = [data_source.pf._get_field_info(f).take_log for f in bin_fields]
     ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
           for f, l in zip(bin_fields, logs)]
     args = [data_source]

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -32,6 +32,7 @@
         fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
+            fi = pf._get_field_info("unknown", cut_field)
             yield assert_equal, frb[cut_field].info['data_source'], \
                 cut.__str__()
             yield assert_equal, frb[cut_field].info['axis'], \
@@ -39,7 +40,7 @@
             yield assert_equal, frb[cut_field].info['field'], \
                 cut_field
             yield assert_equal, frb[cut_field].info['units'], \
-                pf.field_info[cut_field].get_units()
+                fi.get_units()
             yield assert_equal, frb[cut_field].info['xlim'], \
                 frb.bounds[:2]
             yield assert_equal, frb[cut_field].info['ylim'], \

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -152,3 +152,25 @@
         p3d.add_fields(["Ones"])
         yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
 
+def test_particle_profiles():
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(32, nprocs=nproc, particles = 32**3)
+        dd = pf.h.all_data()
+
+        p1d = Profile1D(dd, "particle_position_x", 128,
+                        0.0, 1.0, False, weight_field = None)
+        p1d.add_fields(["particle_ones"])
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
+        p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+                            "particle_position_y", 128, 0.0, 1.0, False,
+                        weight_field = None)
+        p2d.add_fields(["particle_ones"])
+        yield assert_equal, p2d["particle_ones"].sum(), 32**3
+
+        p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+                            "particle_position_y", 128, 0.0, 1.0, False,
+                            "particle_position_z", 128, 0.0, 1.0, False,
+                        weight_field = None)
+        p3d.add_fields(["particle_ones"])
+        yield assert_equal, p3d["particle_ones"].sum(), 32**3

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -46,6 +46,7 @@
                 fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
+                    fi = pf._get_field_info(proj_field)
                     yield assert_equal, frb[proj_field].info['data_source'], \
                             proj.__str__()
                     yield assert_equal, frb[proj_field].info['axis'], \
@@ -53,7 +54,7 @@
                     yield assert_equal, frb[proj_field].info['field'], \
                             proj_field
                     yield assert_equal, frb[proj_field].info['units'], \
-                            pf.field_info[proj_field].get_units()
+                            fi.get_units()
                     yield assert_equal, frb[proj_field].info['xlim'], \
                             frb.bounds[:2]
                     yield assert_equal, frb[proj_field].info['ylim'], \

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -67,6 +67,7 @@
                 fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
+                    fi = pf._get_field_info(slc_field)
                     yield assert_equal, frb[slc_field].info['data_source'], \
                         slc.__str__()
                     yield assert_equal, frb[slc_field].info['axis'], \
@@ -74,7 +75,7 @@
                     yield assert_equal, frb[slc_field].info['field'], \
                         slc_field
                     yield assert_equal, frb[slc_field].info['units'], \
-                        pf.field_info[slc_field].get_units()
+                        fi.get_units()
                     yield assert_equal, frb[slc_field].info['xlim'], \
                         frb.bounds[:2]
                     yield assert_equal, frb[slc_field].info['ylim'], \

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -21,17 +21,24 @@
     cp_z_vec = np.array((0.0, 0.0, 1.0)),
 )
 
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+_base_fields = (("gas", "Density"),
+                ("gas", "x-velocity"),
+                ("gas", "y-velocity"),
+                ("gas", "z-velocity"))
+_base_field_names = [f[1] for f in _base_fields]
 
 def realistic_pf(fields, nprocs):
     np.random.seed(int(0x4d3d3d3))
-    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    fields = list(set([_strip_ftype(f) for f in fields]))
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs,
+                        particles = 4**3)
     pf.parameters["HydroMethod"] = "streaming"
     pf.parameters["Gamma"] = 5.0/3.0
     pf.parameters["EOSType"] = 1.0
     pf.parameters["EOSSoundSpeed"] = 1.0
     pf.conversion_factors["Time"] = 1.0
     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.gamma = 5.0/3.0
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
     pf.omega_matter = 0.27
@@ -41,6 +48,20 @@
         pf.units[unit+'hcm'] = pf.units[unit]
     return pf
 
+def _strip_ftype(field):
+    if not isinstance(field, tuple):
+        return field
+    elif field[0] == "all":
+        return field
+    return field[1]
+
+def _expand_field(field):
+    if isinstance(field, tuple):
+        return field
+    if "particle" in field:
+        return ("all", field)
+    return ("gas", field)
+
 class TestFieldAccess(object):
     description = None
 
@@ -52,8 +73,13 @@
 
     def __call__(self):
         field = FieldInfo[self.field_name]
+        # Don't test the base fields
+        if field in _base_fields or field in _base_field_names: return
         deps = field.get_dependencies()
-        fields = list(set(deps.requested + _base_fields))
+        fields = set([])
+        for f in deps.requested + list(_base_fields):
+            fields.add(_expand_field(f))
+        fields = list(fields)
         skip_grids = False
         needs_spatial = False
         for v in field.validators:
@@ -85,7 +111,7 @@
                 assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
 
 def test_all_fields():
-    for field in FieldInfo:
+    for field in sorted(FieldInfo):
         if isinstance(field, types.TupleType):
             fname = field[0]
         else:
@@ -93,13 +119,16 @@
         if fname.startswith("CuttingPlane"): continue
         if fname.startswith("particle"): continue
         if fname.startswith("CIC"): continue
-        if field.startswith("BetaPar"): continue
-        if field.startswith("TBetaPar"): continue
-        if field.startswith("BetaPerp"): continue
+        if fname.startswith("BetaPar"): continue
+        if fname.startswith("TBetaPar"): continue
+        if fname.startswith("BetaPerp"): continue
         if fname.startswith("WeakLensingConvergence"): continue
         if fname.startswith("DensityPerturbation"): continue
         if fname.startswith("Matter_Density"): continue
         if fname.startswith("Overdensity"): continue
+        # TotalMass is disabled because of issues with mixed particle/fluid
+        # field detection in current field system.
+        if fname.startswith("TotalMass"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             test_all_fields.__name__ = "%s_%s" % (field, nproc)

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,8 @@
     YTFieldData, \
     YTDataContainer, \
     YTSelectionContainer
+from yt.data_objects.particle_unions import \
+    ParticleUnion
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.geometry_handler import \
@@ -66,7 +68,8 @@
 from .fields import \
     StreamFieldInfo, \
     add_stream_field, \
-    KnownStreamFields
+    KnownStreamFields, \
+    _setup_particle_fields
 
 class StreamGrid(AMRGridPatch):
     """
@@ -224,7 +227,12 @@
         GridGeometryHandler._setup_classes(self, dd)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -245,16 +253,14 @@
         already in the stream but not part of the data dict will be left
         alone. 
         """
-        
+        [update_field_names(d) for d in data]
         particle_types = set_particle_types(data[0])
-        ftype = "all"
+        ftype = "io"
 
         for key in data[0].keys() :
             if key is "number_of_particles": continue
             self.stream_handler.particle_types[key] = particle_types[key]
-            if key not in self.field_list:
-                self.field_list.append(key)
-                
+
         for i, grid in enumerate(self.grids) :
             if data[i].has_key("number_of_particles") :
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
@@ -262,10 +268,16 @@
                 if fname in grid.field_data:
                     grid.field_data.pop(fname, None)
                 elif (ftype, fname) in grid.field_data:
-                    grid.field_data.pop( ("all", fname) )
+                    grid.field_data.pop( ("io", fname) )
                 self.stream_handler.fields[grid.id][fname] = data[i][fname]
             
+
+        # We only want to create a superset of fields here.
         self._detect_fields()
+        mylog.debug("Creating Particle Union 'all'")
+        pu = ParticleUnion("all", list(self.pf.particle_types_raw))
+        self.pf.add_particle_union(pu)
+        self.pf.particle_types = tuple(set(self.pf.particle_types))
         self._setup_unknown_fields()
                 
 class StreamStaticOutput(StaticOutput):
@@ -323,10 +335,34 @@
     def _skip_cache(self):
         return True
 
+    def _setup_particle_type(self, ptype):
+        orig = set(self.field_info.items())
+        _setup_particle_fields(self.field_info, ptype)
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
+
 class StreamDictFieldHandler(dict):
+    _additional_fields = ()
 
     @property
-    def all_fields(self): return self[0].keys()
+    def all_fields(self): 
+        fields = list(self._additional_fields) + self[0].keys()
+        fields = list(set(fields))
+        return fields
+
+def update_field_names(data):
+    orig_names = data.keys()
+    for k in orig_names:
+        if isinstance(k, tuple): continue
+        s = getattr(data[k], "shape", ())
+        if len(s) == 1:
+            field = ("io", k)
+        elif len(s) == 3:
+            field = ("gas", k)
+        elif len(s) == 0:
+            continue
+        else:
+            raise NotImplementedError
+        data[field] = data.pop(k)
 
 def set_particle_types(data) :
 
@@ -353,7 +389,7 @@
     if pf.h.num_grids > 1 :
 
         try:
-            x, y, z = (pdata["all","particle_position_%s" % ax] for ax in 'xyz')
+            x, y, z = (pdata["io","particle_position_%s" % ax] for ax in 'xyz')
         except KeyError:
             raise KeyError("Cannot decompose particle data without position fields!")
         
@@ -450,9 +486,16 @@
         pdata["number_of_particles"] = number_of_particles
         for key in data.keys() :
             if len(data[key].shape) == 1 :
-                pdata[key] = data.pop(key)
+                if not isinstance(key, tuple):
+                    field = ("io", key)
+                    mylog.debug("Reassigning '%s' to '%s'", key, field)
+                else:
+                    field = key
+                sfh._additional_fields += (field,)
+                pdata[field] = data.pop(key)
     else :
         particle_types = {}
+    update_field_names(data)
     
     if nprocs > 1:
         temp = {}
@@ -508,12 +551,11 @@
     # Now figure out where the particles go
 
     if number_of_particles > 0 :
-        if ("all", "particle_position_x") not in pdata:
+        if ("io", "particle_position_x") not in pdata:
             pdata_ftype = {}
             for f in [k for k in sorted(pdata)]:
                 if not hasattr(pdata[f], "shape"): continue
-                mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
-                pdata_ftype["all",f] = pdata.pop(f)
+                pdata_ftype["io",f] = pdata.pop(f)
             pdata_ftype.update(pdata)
             pdata = pdata_ftype
         assign_particle_data(spf, pdata)
@@ -593,6 +635,7 @@
         grid_levels[i,:] = g.pop("level")
         if g.has_key("number_of_particles") :
             number_of_particles[i,:] = g.pop("number_of_particles")  
+        update_field_names(g)
         sfh[i] = g
             
     handler = StreamHandler(
@@ -665,7 +708,10 @@
     if number_of_particles > 0 :
         pdata = {}
         for field in base_pf.h.field_list :
-            if base_pf.field_info[field].particle_type :
+            if not isinstance(field, tuple):
+                field = ("unknown", field)
+            fi = base_pf._get_field_info(*field)
+            if fi.particle_type :
                 pdata[field] = np.concatenate([grid[field]
                                                for grid in base_pf.h.grids])
         pdata["number_of_particles"] = number_of_particles
@@ -688,7 +734,10 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                if not pf.field_info[field].particle_type :
+                if not isinstance(field, tuple):
+                    field = ("unknown", field)
+                fi = pf._get_field_info(*field)
+                if not fi.particle_type :
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
@@ -701,7 +750,10 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    if not pf.field_info[field].particle_type :
+                    if not isinstance(field, tuple):
+                        field = ("unknown", field)
+                    fi = pf._get_field_info(*field)
+                    if not fi.particle_type :
                         gd[field] = grid[field]
                 grid_data.append(gd)
         
@@ -712,12 +764,12 @@
     # Now reassign particle data to grids
 
     if number_of_particles > 0:
-        if ("all", "particle_position_x") not in pdata:
+        if ("io", "particle_position_x") not in pdata:
             pdata_ftype = {}
             for f in [k for k in sorted(pdata)]:
                 if not hasattr(pdata[f], "shape"): continue
-                mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
-                pdata_ftype["all",f] = pdata.pop(f)
+                mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
+                pdata_ftype["io",f] = pdata.pop(f)
             pdata_ftype.update(pdata)
             pdata = pdata_ftype
         assign_particle_data(pf, pdata)
@@ -751,17 +803,6 @@
     n_ref = 64
     over_refine_factor = 1
 
-    def _setup_particle_type(self, ptype):
-        orig = set(self.field_info.items())
-        particle_vector_functions(ptype,
-            ["particle_position_%s" % ax for ax in 'xyz'],
-            ["particle_velocity_%s" % ax for ax in 'xyz'],
-            self.field_info)
-        particle_deposition_functions(ptype,
-            "Coordinates", "particle_mass", self.field_info)
-        standard_particle_fields(self.field_info, ptype)
-        return [n for n, v in set(self.field_info.items()).difference(orig)]
-
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
                       n_ref = 64, over_refine_factor = 1):
@@ -819,8 +860,19 @@
 
     sfh = StreamDictFieldHandler()
     
+    pdata = {}
+    for key in data.keys() :
+        if not isinstance(key, tuple):
+            field = ("io", key)
+            mylog.debug("Reassigning '%s' to '%s'", key, field)
+        else:
+            field = key
+        pdata[field] = data[key]
+        sfh._additional_fields += (field,)
+    data = pdata # Drop reference count
+    update_field_names(data)
     particle_types = set_particle_types(data)
-    
+
     sfh.update({'stream_file':data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge
@@ -874,7 +926,7 @@
         self.oct_handler = oct_handler
         self._last_mask = None
         self._last_selector_id = None
-        self._current_particle_type = 'all'
+        self._current_particle_type = 'io'
         self._current_fluid_type = self.pf.default_fluid_type
         self.base_region = base_region
         self.base_selector = base_region.selector
@@ -953,7 +1005,13 @@
         super(StreamOctreeHandler, self)._setup_classes(dd)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
+
 
 class StreamOctreeStaticOutput(StreamStaticOutput):
     _hierarchy_class = StreamOctreeHandler
@@ -1005,6 +1063,7 @@
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+    update_field_names(data)
 
     sfh = StreamDictFieldHandler()
     
@@ -1093,7 +1152,13 @@
             self.io = io_registry[self.data_style](self.pf)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
+
 
 class StreamHexahedralStaticOutput(StreamStaticOutput):
     _hierarchy_class = StreamHexahedralHierarchy

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,37 +34,20 @@
 StreamFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = StreamFieldInfo.add_field
 
-add_stream_field("density", function = NullFunc)
+add_stream_field("Density", function = NullFunc)
 add_stream_field("x-velocity", function = NullFunc)
 add_stream_field("y-velocity", function = NullFunc)
 add_stream_field("z-velocity", function = NullFunc)
 
-add_field("Density", function = TranslationFunc("density"))
-
-add_stream_field("particle_position_x", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_y", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_z", function = NullFunc, particle_type=True)
-add_stream_field("particle_index", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_density", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_temperature", function = NullFunc, particle_type=True)
-add_stream_field("particle_mass", function = NullFunc, particle_type=True)
-
-add_field("ParticleMass", function = TranslationFunc("particle_mass"),
-          particle_type=True)
-
-add_stream_field(("all", "particle_position_x"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_y"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_z"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_index"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_density"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_temperature"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_mass"), function = NullFunc, particle_type=True)
-
-add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
-          particle_type=True)
-
-particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
-                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
-                          StreamFieldInfo)
-particle_deposition_functions("all", "Coordinates", "ParticleMass",
-                               StreamFieldInfo)
+def _setup_particle_fields(registry, ptype):
+    for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+              ["particle_type", "particle_index", "particle_mass"] + \
+              ["particle_position_%s" % ax for ax in 'xyz'] + \
+              ["particle_velocity_%s" % ax for ax in 'xyz']:
+        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
+    particle_vector_functions(ptype,
+        ["particle_position_%s" % ax for ax in 'xyz'],
+        ["particle_velocity_%s" % ax for ax in 'xyz'],
+        registry)
+    particle_deposition_functions(ptype,
+        "Coordinates", "particle_mass", registry)

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -46,7 +46,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype not in ("gas", "deposit") for ftype, fname in fields)):
+        if any((ftype not in ("gas",) for ftype, fname in fields)):
             raise NotImplementedError
         rv = {}
         for field in fields:
@@ -57,63 +57,38 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
-            if ftype == 'deposit':
-                fname = field
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    ds = self.fields[g.id][fname]
+                    ds = self.fields[g.id][field]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
-        if any((ftype != "all" for ftype, fname in fields)):
-            raise NotImplementedError
-        rv = {}
-        # Now we have to do something unpleasant
-        mylog.debug("First pass: counting particles.")
-        size = 0
-        pfields = [("all", "particle_position_%s" % ax) for ax in 'xyz']
         for chunk in chunks:
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 gf = self.fields[g.id]
-                # Sometimes the stream operator won't have the 
-                # ("all", "Something") fields, but instead just "Something".
-                pns = []
-                for pn in pfields:
-                    if pn in gf: pns.append(pn)
-                    else: pns.append(pn[1])
-                size += g.count_particles(selector, 
-                    gf[pns[0]], gf[pns[1]], gf[pns[2]])
-        for field in fields:
-            # TODO: figure out dataset types
-            rv[field] = np.empty(size, dtype='float64')
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s points of %s fields in %s grids",
-                   size, [f2 for f1, f2 in fields], ng)
-        ind = 0
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z  = (gf[ptype, "particle_position_%s" % ax]
+                                for ax in 'xyz')
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
         for chunk in chunks:
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 gf = self.fields[g.id]
-                pns = []
-                for pn in pfields:
-                    if pn in gf: pns.append(pn)
-                    else: pns.append(pn[1])
-                mask = g.select_particles(selector,
-                    gf[pns[0]], gf[pns[1]], gf[pns[2]])
-                if mask is None: continue
-                for field in set(fields):
-                    if field in gf:
-                        fn = field
-                    else:
-                        fn = field[1]
-                    gdata = gf[fn][mask]
-                    rv[field][ind:ind+gdata.size] = gdata
-                ind += gdata.size
-        return rv
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z  = (gf[ptype, "particle_position_%s" % ax]
+                                for ax in 'xyz')
+                    mask = selector.select_points(x, y, z)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = np.asarray(gf[ptype, field])
+                        yield (ptype, field), data[mask]
 
     @property
     def _read_exception(self):
@@ -127,19 +102,8 @@
         self.fields = pf.stream_handler.fields
         super(StreamParticleIOHandler, self).__init__(pf)
 
-    def _read_particle_selection(self, chunks, selector, fields):
-        rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        psize = defaultdict(lambda: 0)
+    def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-        # For this type of file, we actually have something slightly different.
-        # We are given a list of ParticleDataChunks, which is composed of
-        # individual ParticleOctreeSubsets.  The data_files attribute on these
-        # may in fact overlap.  So we will iterate over a union of all the
-        # data_files.
         data_files = set([])
         for chunk in chunks:
             for obj in chunk.objs:
@@ -148,39 +112,32 @@
             f = self.fields[data_file.filename]
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
-                assert(ptype == "all")
-                psize[ptype] += selector.count_points(
-                        f["particle_position_x"],
-                        f["particle_position_y"],
-                        f["particle_position_z"])
-        # Now we have all the sizes, and we can allocate
-        ind = {}
-        for field in fields:
-            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            rv[field] = np.empty(psize[field[0]], dtype="float64")
-            ind[field] = 0
+                yield ptype, (f[ptype, "particle_position_x"],
+                              f[ptype, "particle_position_y"],
+                              f[ptype, "particle_position_z"])
+            
+    def _read_particle_fields(self, chunks, ptf, selector):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
         for data_file in data_files:
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
-                assert(ptype == "all")
-                mask = selector.select_points(
-                        f["particle_position_x"],
-                        f["particle_position_y"],
-                        f["particle_position_z"])
+                x, y, z = (f[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz')
+                mask = selector.select_points(x, y, z)
                 if mask is None: continue
                 for field in field_list:
-                    data = f[field][mask,...]
-                    my_ind = ind[ptype, field]
-                    mylog.debug("Filling from %s to %s with %s",
-                        my_ind, my_ind+data.shape[0], field)
-                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                    ind[ptype, field] += data.shape[0]
-        return rv
+                    data = f[ptype, field][mask]
+                    yield (ptype, field), data
+
 
     def _initialize_index(self, data_file, regions):
         # self.fields[g.id][fname] is the pattern here
         pos = np.column_stack(self.fields[data_file.filename][
-                              "particle_position_%s" % ax] for ax in 'xyz')
+                              ("io", "particle_position_%s" % ax)]
+                              for ax in 'xyz')
         if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
            np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
             raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
@@ -194,11 +151,11 @@
         return morton
 
     def _count_particles(self, data_file):
-        npart = self.fields[data_file.filename]["particle_position_x"].size
-        return {'all': npart}
+        npart = self.fields[data_file.filename]["io", "particle_position_x"].size
+        return {'io': npart}
 
     def _identify_fields(self, data_file):
-        return [ ("all", k) for k in self.fields[data_file.filename].keys()]
+        return self.fields[data_file.filename].keys()
 
 class IOHandlerStreamHexahedral(BaseIOHandler):
     _data_style = "stream_hexahedral"

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -28,7 +28,6 @@
     
     # Check that all of this runs ok without particles
     
-    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
     ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
     amr0 = refine_amr(ug0, rc, fo, 3)
 
@@ -74,16 +73,18 @@
 
     # Check to make sure the fields have been defined correctly
     
-    assert ug1._get_field_info("all", "particle_position_x").particle_type
-    assert ug1._get_field_info("all", "particle_position_y").particle_type
-    assert ug1._get_field_info("all", "particle_position_z").particle_type
-    assert ug1._get_field_info("all", "particle_mass").particle_type
+    for ptype in ("all", "io"):
+        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug1._get_field_info(ptype, "particle_mass").particle_type
     assert not ug1._get_field_info("gas", "Density").particle_type
 
-    assert ug2._get_field_info("all", "particle_position_x").particle_type
-    assert ug2._get_field_info("all", "particle_position_y").particle_type
-    assert ug2._get_field_info("all", "particle_position_z").particle_type
-    assert ug2._get_field_info("all", "particle_mass").particle_type
+    for ptype in ("all", "io"):
+        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug2._get_field_info(ptype, "particle_mass").particle_type
     assert not ug2._get_field_info("gas", "Density").particle_type
     
     # Now refine this

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -88,9 +88,11 @@
     for n_ref in [16, 32, 64, 512, 1024]:
         pf = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
         dd = pf.h.all_data()
-        bi = dd["all","mesh_id"]
+        bi = dd["io","mesh_id"]
         v = np.bincount(bi.astype("int64"))
         yield assert_equal, v.max() <= n_ref, True
+        bi2 = dd["all","mesh_id"]
+        yield assert_equal, bi, bi2
 
 def test_particle_overrefine():
     np.random.seed(int(0x4d3d3d3))

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,8 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter, create_profile
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -138,7 +138,7 @@
     return left, right, level
 
 def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
-                   negative = False, nprocs = 1):
+                   negative = False, nprocs = 1, particles = 0):
     from yt.data_objects.api import data_object_registry
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
@@ -156,6 +156,13 @@
             offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
                  for field,offset in zip(fields,offsets))
+    if particles:
+        for f in ('particle_position_%s' % ax for ax in 'xyz'):
+            data[f] = np.random.uniform(size = particles)
+        for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
+            data[f] = np.random.random(size = particles) - 0.5
+        data['particle_mass'] = np.random.random(particles)
+        data['number_of_particles'] = particles
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug
 

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -59,7 +59,9 @@
         A dictionary of field parameters to set.
     """
 
-    field_obj = pf.field_info[field_name]
+    if isinstance(field_name, tuple):
+        field_name = field_name[1]
+    field_obj = pf._get_field_info(field_name)
     if field_obj.particle_type:
         print( "Saving particle fields currently not supported." )
         return
@@ -84,6 +86,9 @@
     # add field info to field_types group
     g = fhandle["field_types"]
     # create the subgroup with the field's name
+    if isinstance(field_name, tuple):
+        field_name = field_name[1]
+    fi = pf._get_field_info(field_name)
     try:
         sg = g.create_group(field_name)
     except ValueError:
@@ -91,8 +96,8 @@
         sys.exit(1)
         
     # grab the display name and units from the field info container.
-    display_name = pf.field_info[field_name].display_name
-    units = pf.field_info[field_name].get_units()
+    display_name = fi.display_name
+    units = fi.get_units()
 
     # check that they actually contain something...
     if display_name:
@@ -122,9 +127,8 @@
         pt_group = particles_group[particle_type_name]
         # add the field data to the grid group
         # Check if this is a real field or particle data.
-        field_obj = pf.field_info[field_name]
         grid.get_data(field_name)
-        if field_obj.particle_type:  # particle data
+        if fi.particle_type:  # particle data
             pt_group[field_name] = grid[field_name]
         else:  # a field
             grid_group[field_name] = grid[field_name]

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -5,9 +5,9 @@
 
 class ParticleGenerator(object) :
 
-    default_fields = ["particle_position_x",
-                      "particle_position_y",
-                      "particle_position_z"]
+    default_fields = [("io", "particle_position_x"),
+                      ("io", "particle_position_y"),
+                      ("io", "particle_position_z")]
     
     def __init__(self, pf, num_particles, field_list) :
         """
@@ -20,7 +20,7 @@
         self.pf = pf
         self.num_particles = num_particles
         self.field_list = field_list
-        self.field_list.append("particle_index")
+        self.field_list.append(("io", "particle_index"))
         
         try :
             self.posx_index = self.field_list.index(self.default_fields[0])
@@ -28,9 +28,8 @@
             self.posz_index = self.field_list.index(self.default_fields[2])
         except :
             raise KeyError("Field list must contain the following fields: " +
-                           "\'particle_position_x\', \'particle_position_y\'" +
-                           ", \'particle_position_z\' ")
-        self.index_index = self.field_list.index("particle_index")
+                           "\n".join(self.default_fields))
+        self.index_index = self.field_list.index(("io", "particle_index"))
         
         self.num_grids = self.pf.h.num_grids
         self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
@@ -212,9 +211,9 @@
         """
 
         field_list = data.keys()
-        x = data.pop("particle_position_x")
-        y = data.pop("particle_position_y")
-        z = data.pop("particle_position_z")
+        x = data.pop(("io", "particle_position_x"))
+        y = data.pop(("io", "particle_position_y"))
+        z = data.pop(("io", "particle_position_z"))
 
         xcond = np.logical_or(x < pf.domain_left_edge[0],
                               x >= pf.domain_right_edge[0])

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -23,11 +23,13 @@
 
     # Now generate particles from density
 
-    field_list = ["particle_position_x","particle_position_y",
-                  "particle_position_z","particle_index",
-                  "particle_gas_density"]
+    field_list = [("io", "particle_position_x"),
+                  ("io", "particle_position_y"),
+                  ("io", "particle_position_z"),
+                  ("io", "particle_index"),
+                  ("io", "particle_gas_density")]
     num_particles = 1000000
-    field_dict = {"Density": "particle_gas_density"}
+    field_dict = {("gas", "Density"): ("io", "particle_gas_density")}
     sphere = pf.h.sphere(pf.domain_center, 0.45)
 
     particles1 = WithDensityParticleGenerator(pf, sphere, num_particles, field_list)
@@ -41,6 +43,8 @@
     particles_per_grid1 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
     yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
 
+    tags = np.concatenate([grid["particle_index"] for grid in pf.h.grids])
+    assert(np.unique(tags).size == num_particles)
     # Set up a lattice of particles
     pdims = np.array([64,64,64])
     def new_indices() :
@@ -48,31 +52,33 @@
         return np.arange((np.product(pdims)))+num_particles
     le = np.array([0.25,0.25,0.25])
     re = np.array([0.75,0.75,0.75])
-    new_field_list = field_list + ["particle_gas_temperature"]
-    new_field_dict = {"Density": "particle_gas_density",
-                      "Temperature": "particle_gas_temperature"}
+    new_field_list = field_list + [("io", "particle_gas_temperature")]
+    new_field_dict = {("gas", "Density"): ("io", "particle_gas_density"),
+                      ("gas", "Temperature"): ("io", "particle_gas_temperature")}
 
     particles2 = LatticeParticleGenerator(pf, pdims, le, re, new_field_list)
     particles2.assign_indices(function=new_indices)
     particles2.map_grid_fields_to_particles(new_field_dict)
 
     #Test lattice positions
-    xpos = np.unique(particles2["particle_position_x"])
-    ypos = np.unique(particles2["particle_position_y"])
-    zpos = np.unique(particles2["particle_position_z"])
+    xpos = np.unique(particles2["io", "particle_position_x"])
+    ypos = np.unique(particles2["io", "particle_position_y"])
+    zpos = np.unique(particles2["io", "particle_position_z"])
 
     xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
     ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
     zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
 
-    yield assert_almost_equal, xpos, xpred
-    yield assert_almost_equal, ypos, ypred
-    yield assert_almost_equal, zpos, zpred
+    assert_almost_equal( xpos, xpred)
+    assert_almost_equal( ypos, ypred)
+    assert_almost_equal( zpos, zpred)
 
     #Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in pf.h.grids]
     yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
+
+    [grid.field_data.clear() for grid in pf.h.grids]
     particles_per_grid2 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
     yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
 

diff -r 8249a2890da54d3677a3c472598f59f282415a64 -r bc9faeefee77acc1e0a042b20da35b3bf26b4d82 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -119,7 +119,7 @@
     
     Parameters
     ----------
-    data_source : AMR4DData Object
+    data_source : AMR3DData Object
         The data object to be profiled, such as all_data, region, or 
         sphere.
     x_field : str
@@ -246,13 +246,18 @@
             name = "%s.png" % prefix
         suffix = get_image_suffix(name)
         prefix = name[:name.rfind(suffix)]
+        xfn = self.profiles[0].x_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
         if not suffix:
             suffix = ".png"
         canvas_cls = get_canvas(name)
         for uid, fig in iters:
+            if isinstance(uid, types.TupleType):
+                uid = uid[1]
             canvas = canvas_cls(fig)
             fn = "%s_1d-Profile_%s_%s%s" % \
-              (prefix, self.profiles[0].x_field, uid, suffix)
+              (prefix, xfn, uid, suffix)
             mylog.info("Saving %s", fn)
             canvas.print_figure(fn)
         return self
@@ -410,7 +415,8 @@
             
     def _get_field_log(self, field_y, profile):
         pf = profile.data_source.pf
-        yfi = pf.field_info[field_y]
+        yf, = profile.data_source._determine_fields([field_y])
+        yfi = pf._get_field_info(*yf)
         if self.x_log is None:
             x_log = profile.x_log
         else:
@@ -425,6 +431,7 @@
     def _get_field_label(self, field, field_info):
         units = field_info.get_units()
         field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
         if field_name is None:
             field_name = r'$\rm{'+field+r'}$'
         elif field_name.find('$') == -1:
@@ -438,8 +445,10 @@
     def _get_field_title(self, field_y, profile):
         pf = profile.data_source.pf
         field_x = profile.x_field
-        xfi = pf.field_info[field_x]
-        yfi = pf.field_info[field_y]
+        xf, yf = profile.data_source._determine_fields(
+            [field_x, field_y])
+        xfi = pf._get_field_info(*xf)
+        yfi = pf._get_field_info(*yf)
         x_title = self.x_title or self._get_field_label(field_x, xfi)
         y_title = self.y_title.get(field_y, None) or \
                     self._get_field_label(field_y, yfi)
@@ -554,9 +563,11 @@
         pf = profile.data_source.pf
         field_x = profile.x_field
         field_y = profile.y_field
-        xfi = pf.field_info[field_x]
-        yfi = pf.field_info[field_y]
-        zfi = pf.field_info[field_z]
+        xf, yf, zf = profile.data_source._determine_fields(
+            [field_x, field_y, field_z])
+        xfi = pf._get_field_info(*xf)
+        yfi = pf._get_field_info(*yf)
+        zfi = pf._get_field_info(*zf)
         x_title = self.x_title or self._get_field_label(field_x, xfi)
         y_title = self.y_title or self._get_field_label(field_y, yfi)
         z_title = self.z_title.get(field_z, None) or \
@@ -566,6 +577,7 @@
     def _get_field_label(self, field, field_info):
         units = field_info.get_units()
         field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
         if field_name is None:
             field_name = r'$\rm{'+field+r'}$'
         elif field_name.find('$') == -1:
@@ -578,7 +590,8 @@
         
     def _get_field_log(self, field_z, profile):
         pf = profile.data_source.pf
-        zfi = pf.field_info[field_z]
+        zf, = profile.data_source._determine_fields([field_z])
+        zfi = pf._get_field_info(*zf)
         if self.x_log is None:
             x_log = profile.x_log
         else:
@@ -659,9 +672,16 @@
 
         if not self._plot_valid: self._setup_plots()
         if mpl_kwargs is None: mpl_kwargs = {}
+        xfn = self.profile.x_field
+        yfn = self.profile.y_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
+        if isinstance(yfn, types.TupleType):
+            yfn = yfn[1]
         for f in self.profile.field_data:
-            middle = "2d-Profile_%s_%s_%s" % (self.profile.x_field, 
-                                              self.profile.y_field, f)
+            _f = f
+            if isinstance(f, types.TupleType): _f = _f[1]
+            middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
             if name is None:
                 prefix = self.profile.pf
                 name = "%s.png" % prefix

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list