[yt-svn] commit/yt: 30 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu May 25 14:18:43 PDT 2017


30 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/cf8d83027094/
Changeset:   cf8d83027094
Branch:      yt
User:        jzuhone
Date:        2017-04-05 05:05:37+00:00
Summary:     Whitespace nits. Beginning to add the capability for multiple particle types to stream datasets.
Affected #:  1 file

diff -r 4409c8396c9ad1eafd15f08052aec9b2f2682a0a -r cf8d83027094133e266424e5816a699579f9e12b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -150,11 +150,11 @@
     def get_fields(self):
         return self.fields.all_fields
 
-    def get_particle_type(self, field) :
+    def get_particle_type(self, field):
 
-        if field in self.particle_types :
+        if field in self.particle_types:
             return self.particle_types[field]
-        else :
+        else:
             return False
 
 class StreamHierarchy(GridIndex):
@@ -252,7 +252,7 @@
         else:
             self.io = io_registry[self.dataset_type](self.ds)
 
-    def update_data(self, data, units = None):
+    def update_data(self, data, units=None):
 
         """
         Update the stream data with a new data dict. If fields already exist,
@@ -266,20 +266,20 @@
         particle_types = set_particle_types(data[0])
         ftype = "io"
 
-        for key in data[0].keys() :
+        for key in data[0].keys():
             if key is "number_of_particles": continue
             self.stream_handler.particle_types[key] = particle_types[key]
 
-        for i, grid in enumerate(self.grids) :
-            if "number_of_particles" in data[i] :
+        for i, grid in enumerate(self.grids):
+            if "number_of_particles" in data[i]:
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
             for fname in data[i]:
                 if fname in grid.field_data:
                     grid.field_data.pop(fname, None)
                 elif (ftype, fname) in grid.field_data:
-                    grid.field_data.pop( ("io", fname) )
+                    grid.field_data.pop((ftype, fname))
                 self.stream_handler.fields[grid.id][fname] = data[i][fname]
-            
+
         # We only want to create a superset of fields here.
         self._detect_output_fields()
         self.ds.create_field_info()
@@ -303,7 +303,13 @@
         self.fluid_types += ("stream",)
         self.geometry = geometry
         self.stream_handler = stream_handler
-        name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
+        particle_types = []
+        for k, v in self.stream_handler.particle_types.items():
+            if v:
+                particle_types.append(k[0])
+        self.particle_types = tuple(set(particle_types))
+        self.particle_types_raw = self.particle_types
+        name = "InMemoryParameterFile_%s" % uuid.uuid4().hex
         from yt.data_objects.static_output import _cached_datasets
         _cached_datasets[name] = self
         Dataset.__init__(self, name, self._dataset_type,
@@ -1116,7 +1122,6 @@
     data = pdata # Drop reference count
     update_field_names(data)
     particle_types = set_particle_types(data)
-
     sfh.update({'stream_file':data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge


https://bitbucket.org/yt_analysis/yt/commits/7e0136b96766/
Changeset:   7e0136b96766
Branch:      yt
User:        jzuhone
Date:        2017-04-05 05:16:41+00:00
Summary:     A couple of whitespace nits
Affected #:  1 file

diff -r cf8d83027094133e266424e5816a699579f9e12b -r 7e0136b96766b870416d8bc6f4ca0b065f96b1a6 yt/frontends/stream/tests/test_update_data.py
--- a/yt/frontends/stream/tests/test_update_data.py
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -2,7 +2,7 @@
 from yt.data_objects.profiles import create_profile
 from numpy.random import uniform
 
-def test_update_data() :
+def test_update_data():
     ds = fake_random_ds(64, nprocs=8)
     ds.index
     dims = (32,32,32)
@@ -13,4 +13,4 @@
     prj["temperature"]
     dd = ds.all_data()
     profile = create_profile(dd, "density", "temperature", 10)
-    profile["temperature"]                             
+    profile["temperature"]


https://bitbucket.org/yt_analysis/yt/commits/b74543454a28/
Changeset:   b74543454a28
Branch:      yt
User:        jzuhone
Date:        2017-04-11 14:44:39+00:00
Summary:     Don’t add particle fields as fluid fields
Affected #:  1 file

diff -r 7e0136b96766b870416d8bc6f4ca0b065f96b1a6 -r b74543454a2827ae3bdc4d3fab80cfa6a4b0244f yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -74,8 +74,11 @@
         from yt.fields.magnetic_field import \
             setup_magnetic_field_aliases
         for field in self.ds.stream_handler.field_units:
+            if field[0] in self.ds.particle_types:
+                continue
             units = self.ds.stream_handler.field_units[field]
-            if units != '': self.add_output_field(field, sampling_type="cell", units=units)
+            if units != '': 
+                self.add_output_field(field, sampling_type="cell", units=units)
         setup_magnetic_field_aliases(self, "stream", ["magnetic_field_%s" % ax for ax in "xyz"])
 
     def add_output_field(self, name, sampling_type, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/31b951b50db2/
Changeset:   31b951b50db2
User:        jzuhone
Date:        2017-05-09 21:47:10+00:00
Summary:     Simplify update_data and make sure that it handles all field types generically. Whitespace.
Affected #:  2 files

diff -r b74543454a2827ae3bdc4d3fab80cfa6a4b0244f -r 31b951b50db2f7d9b1fdbcccebd36d74efd3c0e1 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -252,7 +252,7 @@
         else:
             self.io = io_registry[self.dataset_type](self.ds)
 
-    def update_data(self, data, units=None):
+    def update_data(self, data):
 
         """
         Update the stream data with a new data dict. If fields already exist,
@@ -260,25 +260,22 @@
         already in the stream but not part of the data dict will be left
         alone. 
         """
-        [update_field_names(d) for d in data]
-        if units is not None:
-            self.stream_handler.field_units.update(units)
         particle_types = set_particle_types(data[0])
-        ftype = "io"
 
         for key in data[0].keys():
-            if key is "number_of_particles": continue
+            if key is "number_of_particles":
+                continue
             self.stream_handler.particle_types[key] = particle_types[key]
 
         for i, grid in enumerate(self.grids):
             if "number_of_particles" in data[i]:
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
-            for fname in data[i]:
-                if fname in grid.field_data:
-                    grid.field_data.pop(fname, None)
-                elif (ftype, fname) in grid.field_data:
-                    grid.field_data.pop((ftype, fname))
-                self.stream_handler.fields[grid.id][fname] = data[i][fname]
+            field_units, gdata = unitify_data(data[i])
+            self.stream_handler.field_units.update(field_units)
+            for field in gdata:
+                if field in grid.field_data:
+                    grid.field_data.pop(field, None)
+                self.stream_handler.fields[grid.id][field] = gdata[field]
 
         # We only want to create a superset of fields here.
         self._detect_output_fields()
@@ -451,10 +448,10 @@
         particle_grid_count = np.bincount(particle_grid_inds.astype("intp"),
                                           minlength=num_grids)
         particle_indices = np.zeros(num_grids + 1, dtype='int64')
-        if num_grids > 1 :
+        if num_grids > 1:
             np.add.accumulate(particle_grid_count.squeeze(),
                               out=particle_indices[1:])
-        else :
+        else:
             particle_indices[1] = particle_grid_count.squeeze()
 
         pdata.pop("number_of_particles", None)
@@ -468,7 +465,7 @@
                 grid[key] = pdata[key][idxs][start:end]
             grid_pdata.append(grid)
 
-    else :
+    else:
         grid_pdata = [pdata]
 
     for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
@@ -536,7 +533,7 @@
         # overridden here.
         if any(f[0] == new_field[1] for f in known_fields) and \
            field_units[new_field] == "":
-            field_units.pop(new_field)
+               field_units.pop(new_field)
     data = new_data
     return field_units, data
 

diff -r b74543454a2827ae3bdc4d3fab80cfa6a4b0244f -r 31b951b50db2f7d9b1fdbcccebd36d74efd3c0e1 yt/frontends/stream/tests/test_update_data.py
--- a/yt/frontends/stream/tests/test_update_data.py
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -6,9 +6,9 @@
     ds = fake_random_ds(64, nprocs=8)
     ds.index
     dims = (32,32,32)
-    grid_data = [{"temperature":uniform(size=dims)}
+    grid_data = [{"temperature": uniform(size=dims)}
                  for i in range(ds.index.num_grids)]
-    ds.index.update_data(grid_data, {('gas', 'temperature'):'K'})
+    ds.index.update_data(grid_data)
     prj = ds.proj("temperature", 2)
     prj["temperature"]
     dd = ds.all_data()


https://bitbucket.org/yt_analysis/yt/commits/a69604958ccf/
Changeset:   a69604958ccf
User:        jzuhone
Date:        2017-05-09 21:53:33+00:00
Summary:     Whitespace and other simplification
Affected #:  1 file

diff -r 31b951b50db2f7d9b1fdbcccebd36d74efd3c0e1 -r a69604958ccf1e44e4f1247055d47842beae4dfd yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -170,13 +170,13 @@
         grid_data = []
         for i,g in enumerate(self.ds.index.grids):
             data = {}
-            if clobber :
+            if clobber:
                 data["number_of_particles"] = self.NumberOfParticles[i]
-            else :
+            else:
                 data["number_of_particles"] = self.NumberOfParticles[i] + \
                                               g.NumberOfParticles
             grid_particles = self.get_for_grid(g)
-            for field in self.field_list :
+            for field in self.field_list:
                 if data["number_of_particles"] > 0:
                     # We have particles in this grid
                     if g.NumberOfParticles > 0 and not clobber:
@@ -187,9 +187,9 @@
                         else:
                             # This one doesn't, set the previous particles' field
                             # values to zero
-                            prev_particles = np.zeros((g.NumberOfParticles))
+                            prev_particles = np.zeros(g.NumberOfParticles)
                             prev_particles = self.ds.arr(prev_particles,
-                                input_units = self.field_units[field])
+                                                         self.field_units[field])
                         data[field] = uconcatenate((prev_particles,
                                                     grid_particles[field]))
                     else:


https://bitbucket.org/yt_analysis/yt/commits/84996033d2c1/
Changeset:   84996033d2c1
User:        jzuhone
Date:        2017-05-14 14:50:29+00:00
Summary:     Put this in a method and make sure that unstructured mesh datasets don’t use it
Affected #:  1 file

diff -r a69604958ccf1e44e4f1247055d47842beae4dfd -r 84996033d2c180ba8d02deedc1a3fae15155e1f8 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -300,12 +300,7 @@
         self.fluid_types += ("stream",)
         self.geometry = geometry
         self.stream_handler = stream_handler
-        particle_types = []
-        for k, v in self.stream_handler.particle_types.items():
-            if v:
-                particle_types.append(k[0])
-        self.particle_types = tuple(set(particle_types))
-        self.particle_types_raw = self.particle_types
+        self._find_particle_types()
         name = "InMemoryParameterFile_%s" % uuid.uuid4().hex
         from yt.data_objects.static_output import _cached_datasets
         _cached_datasets[name] = self
@@ -366,6 +361,14 @@
     def _skip_cache(self):
         return True
 
+    def _find_particle_types(self):
+        particle_types = []
+        for k, v in self.stream_handler.particle_types.items():
+            if v:
+                particle_types.append(k[0])
+        self.particle_types = tuple(set(particle_types))
+        self.particle_types_raw = self.particle_types
+
 class StreamDictFieldHandler(dict):
     _additional_fields = ()
 
@@ -1669,6 +1672,9 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
+    def _find_particle_types(self):
+        pass
+
 def load_unstructured_mesh(connectivity, coordinates, node_data=None,
                            elem_data=None, length_unit=None, bbox=None,
                            sim_time=0.0, mass_unit=None, time_unit=None,


https://bitbucket.org/yt_analysis/yt/commits/8d275c00a91e/
Changeset:   8d275c00a91e
User:        jzuhone
Date:        2017-05-14 23:44:19+00:00
Summary:     update_data should check for particle types
Affected #:  1 file

diff -r 84996033d2c180ba8d02deedc1a3fae15155e1f8 -r 8d275c00a91e460b898678bc8b384f4e1c400f60 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -266,6 +266,7 @@
             if key is "number_of_particles":
                 continue
             self.stream_handler.particle_types[key] = particle_types[key]
+        self.ds._find_particle_types()
 
         for i, grid in enumerate(self.grids):
             if "number_of_particles" in data[i]:


https://bitbucket.org/yt_analysis/yt/commits/2ba78f0fe711/
Changeset:   2ba78f0fe711
User:        jzuhone
Date:        2017-05-14 23:44:35+00:00
Summary:     Whitespace (+1 squashed commit)
Squashed commits:
[31f14b4] whitespace
Affected #:  2 files

diff -r 8d275c00a91e460b898678bc8b384f4e1c400f60 -r 2ba78f0fe711377069a52965d566bc6edcbe216c yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -465,7 +465,7 @@
             grid["number_of_particles"] = pcount
             start = particle_indices[i]
             end = particle_indices[i+1]
-            for key in pdata.keys() :
+            for key in pdata.keys():
                 grid[key] = pdata[key][idxs][start:end]
             grid_pdata.append(grid)
 
@@ -951,7 +951,7 @@
             if not isinstance(field, tuple):
                 field = ("unknown", field)
             fi = base_ds._get_field_info(*field)
-            if fi.particle_type :
+            if fi.particle_type:
                 pdata[field] = uconcatenate([grid[field]
                                                for grid in base_ds.index.grids])
         pdata["number_of_particles"] = number_of_particles
@@ -969,10 +969,10 @@
         if callback is not None: callback(ds)
         grid_data = []
         for g in ds.index.grids:
-            gd = dict( left_edge = g.LeftEdge,
-                       right_edge = g.RightEdge,
-                       level = g.Level,
-                       dimensions = g.ActiveDimensions )
+            gd = dict(left_edge=g.LeftEdge,
+                       right_edge=g.RightEdge,
+                       level=g.Level,
+                       dimensions=g.ActiveDimensions)
             for field in ds.field_list:
                 if not isinstance(field, tuple):
                     field = ("unknown", field)
@@ -987,13 +987,13 @@
                 LE = sg.left_index * g.dds + ds.domain_left_edge
                 dims = sg.dimensions * ds.refine_by
                 grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims)
-                gd = dict(left_edge = LE, right_edge = grid.right_edge,
-                          level = g.Level + 1, dimensions = dims)
+                gd = dict(left_edge=LE, right_edge=grid.right_edge,
+                          level=g.Level + 1, dimensions=dims)
                 for field in ds.field_list:
                     if not isinstance(field, tuple):
                         field = ("unknown", field)
                     fi = ds._get_field_info(*field)
-                    if not fi.particle_type :
+                    if not fi.particle_type:
                         gd[field] = grid[field]
                 grid_data.append(gd)
 

diff -r 8d275c00a91e460b898678bc8b384f4e1c400f60 -r 2ba78f0fe711377069a52965d566bc6edcbe216c yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -31,7 +31,7 @@
 
     grid_data = []
 
-    for grid in amr0.index.grids :
+    for grid in amr0.index.grids:
 
         data = dict(left_edge = grid.LeftEdge,
                     right_edge = grid.RightEdge,


https://bitbucket.org/yt_analysis/yt/commits/f5b0d9dddd0b/
Changeset:   f5b0d9dddd0b
User:        jzuhone
Date:        2017-05-15 19:17:22+00:00
Summary:     Removed outdated commented lines
Affected #:  1 file

diff -r 2ba78f0fe711377069a52965d566bc6edcbe216c -r f5b0d9dddd0b00e3ecfcefc820b768c72b070806 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -294,10 +294,6 @@
 
     def __init__(self, stream_handler, storage_filename=None,
                  geometry="cartesian", unit_system="cgs"):
-        #if parameter_override is None: parameter_override = {}
-        #self._parameter_override = parameter_override
-        #if conversion_override is None: conversion_override = {}
-        #self._conversion_override = conversion_override
         self.fluid_types += ("stream",)
         self.geometry = geometry
         self.stream_handler = stream_handler


https://bitbucket.org/yt_analysis/yt/commits/7e7707cd25a9/
Changeset:   7e7707cd25a9
User:        jzuhone
Date:        2017-05-21 13:54:00+00:00
Summary:     Replace "io" with ptype where needed and iterate over particle types. fixed some more style issues.
Affected #:  2 files

diff -r f5b0d9dddd0b00e3ecfcefc820b768c72b070806 -r 7e7707cd25a9bf288917d1a1851c37b63440e1e4 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -404,7 +404,7 @@
             particle_types[key] = False
     return particle_types
 
-def assign_particle_data(ds, pdata):
+def assign_particle_data(ds, ptype, pdata):
 
     """
     Assign particle data to the grids using MatchPointsToGrids. This
@@ -419,10 +419,10 @@
 
     if len(ds.stream_handler.fields) > 1:
 
-        if ("io", "particle_position_x") in pdata:
-            x, y, z = (pdata["io", "particle_position_%s" % ax] for ax in 'xyz')
-        elif ("io", "particle_position") in pdata:
-            x, y, z = pdata["io", "particle_position"].T
+        if (ptype, "particle_position_x") in pdata:
+            x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in 'xyz')
+        elif (ptype, "particle_position") in pdata:
+            x, y, z = pdata[ptype, "particle_position"].T
         else:
             raise KeyError(
                 "Cannot decompose particle data without position fields!")
@@ -717,23 +717,24 @@
 
     sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)
 
-    check_fields = [("io", "particle_position_x"), ("io", "particle_position")]
-
     # Now figure out where the particles go
     if number_of_particles > 0:
-        if all(f not in pdata for f in check_fields):
-            pdata_ftype = {}
-            for f in [k for k in sorted(pdata)]:
-                if not hasattr(pdata[f], "shape"):
-                    continue
-                if f == 'number_of_particles':
-                    continue
-                mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
-                pdata_ftype["io",f] = pdata.pop(f)
-            pdata_ftype.update(pdata)
-            pdata = pdata_ftype
-        # This will update the stream handler too
-        assign_particle_data(sds, pdata)
+        for ptype in sds.particle_types_raw:
+            check_fields = [(ptype, "particle_position_x"), 
+                            (ptype, "particle_position")]
+            if all(f not in pdata for f in check_fields):
+                pdata_ftype = {}
+                for f in [k for k in sorted(pdata)]:
+                    if not hasattr(pdata[f], "shape"):
+                        continue
+                    if f == 'number_of_particles':
+                        continue
+                    mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
+                    pdata_ftype[ptype, f] = pdata.pop(f)
+                pdata_ftype.update(pdata)
+                pdata = pdata_ftype
+            # This will update the stream handler too
+            assign_particle_data(sds, ptype, pdata)
 
     return sds
 
@@ -949,7 +950,7 @@
             fi = base_ds._get_field_info(*field)
             if fi.particle_type:
                 pdata[field] = uconcatenate([grid[field]
-                                               for grid in base_ds.index.grids])
+                                             for grid in base_ds.index.grids])
         pdata["number_of_particles"] = number_of_particles
 
     last_gc = base_ds.index.num_grids
@@ -996,15 +997,16 @@
         ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox)
 
         if number_of_particles > 0:
-            if ("io", "particle_position_x") not in pdata:
-                pdata_ftype = {}
-                for f in [k for k in sorted(pdata)]:
-                    if not hasattr(pdata[f], "shape"): continue
-                    mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
-                    pdata_ftype["io",f] = pdata.pop(f)
-                pdata_ftype.update(pdata)
-                pdata = pdata_ftype
-            assign_particle_data(ds, pdata)
+            for ptype in base_ds.particle_types_raw:
+                if (ptype, "particle_position_x") not in pdata:
+                    pdata_ftype = {}
+                    for f in [k for k in sorted(pdata)]:
+                        if not hasattr(pdata[f], "shape"): continue
+                        mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
+                        pdata_ftype[ptype, f] = pdata.pop(f)
+                    pdata_ftype.update(pdata)
+                    pdata = pdata_ftype
+                assign_particle_data(ds, ptype, pdata)
             # We need to reassign the field list here.
         cur_gc = ds.index.num_grids
 

diff -r f5b0d9dddd0b00e3ecfcefc820b768c72b070806 -r 7e7707cd25a9bf288917d1a1851c37b63440e1e4 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -15,7 +15,7 @@
     x = np.random.uniform(size=num_particles)
     y = np.random.uniform(size=num_particles)
     z = np.random.uniform(size=num_particles)
-    m = np.ones((num_particles))
+    m = np.ones(num_particles)
 
     # Field operators and cell flagging methods
 
@@ -33,11 +33,11 @@
 
     for grid in amr0.index.grids:
 
-        data = dict(left_edge = grid.LeftEdge,
-                    right_edge = grid.RightEdge,
-                    level = grid.Level,
-                    dimensions = grid.ActiveDimensions,
-                    number_of_particles = grid.NumberOfParticles)
+        data = dict(left_edge=grid.LeftEdge,
+                    right_edge=grid.RightEdge,
+                    level=grid.Level,
+                    dimensions=grid.ActiveDimensions,
+                    number_of_particles=grid.NumberOfParticles)
 
         for field in amr0.field_list:
             data[field] = grid[field]
@@ -93,14 +93,13 @@
 
     for grid in amr1.index.grids:
 
-        data = dict(left_edge = grid.LeftEdge,
-                    right_edge = grid.RightEdge,
-                    level = grid.Level,
-                    dimensions = grid.ActiveDimensions,
-                    number_of_particles = grid.NumberOfParticles)
+        data = dict(left_edge=grid.LeftEdge,
+                    right_edge=grid.RightEdge,
+                    level=grid.Level,
+                    dimensions=grid.ActiveDimensions,
+                    number_of_particles=grid.NumberOfParticles)
 
         for field in amr1.field_list:
-
             data[field] = grid[field]
 
         grid_data.append(data)


https://bitbucket.org/yt_analysis/yt/commits/d5b527970aef/
Changeset:   d5b527970aef
User:        jzuhone
Date:        2017-05-21 14:10:18+00:00
Summary:     simplify assert here
Affected #:  1 file

diff -r 7e7707cd25a9bf288917d1a1851c37b63440e1e4 -r d5b527970aefe814b58c5a9753e519bb6fc4066e yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -87,7 +87,7 @@
 
     amr1 = refine_amr(ug1, rc, fo, 3)
     for field in sorted(ug1.field_list):
-        assert_equal((field in amr1.field_list), True)
+        assert field in amr1.field_list
 
     grid_data = []
 


https://bitbucket.org/yt_analysis/yt/commits/e8991a9f5dee/
Changeset:   e8991a9f5dee
User:        jzuhone
Date:        2017-05-22 15:24:17+00:00
Summary:     Make sure we don’t divide by zero if the virial radius is 0
Affected #:  1 file

diff -r d5b527970aefe814b58c5a9753e519bb6fc4066e -r e8991a9f5dee2b537756b6cbf9db6c97a751d03f yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -107,7 +107,11 @@
     # r / r_vir
     def _virial_radius_fraction(field, data):
         virial_radius = data.get_field_parameter("virial_radius")
-        return data["radius"] / virial_radius
+        if virial_radius == 0.0:
+            ret = 0.0
+        else:
+            ret = data["radius"] / virial_radius
+        return ret
 
     registry.add_field(("index", "virial_radius_fraction"), sampling_type="cell", 
                        function=_virial_radius_fraction,


https://bitbucket.org/yt_analysis/yt/commits/898760af489d/
Changeset:   898760af489d
User:        jzuhone
Date:        2017-05-22 15:26:51+00:00
Summary:     Don’t add particle unions. "all" will be re-created
Affected #:  1 file

diff -r e8991a9f5dee2b537756b6cbf9db6c97a751d03f -r 898760af489d296ae67aad4a97bc44e54c6c227a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -948,7 +948,7 @@
             if not isinstance(field, tuple):
                 field = ("unknown", field)
             fi = base_ds._get_field_info(*field)
-            if fi.particle_type:
+            if fi.particle_type and field[0] in base_ds.particle_types_raw:
                 pdata[field] = uconcatenate([grid[field]
                                              for grid in base_ds.index.grids])
         pdata["number_of_particles"] = number_of_particles


https://bitbucket.org/yt_analysis/yt/commits/f868186b3f79/
Changeset:   f868186b3f79
User:        jzuhone
Date:        2017-05-22 15:33:23+00:00
Summary:     Refactor assign_particle_data to allow for multiple particle types. This has the unfortunate side effect of removing the ability to refine on particle count in refine_amr, but I don’t think we were ever doing it
Affected #:  1 file

diff -r 898760af489d296ae67aad4a97bc44e54c6c227a -r f868186b3f79dd476d2ca8a7d69ca79160a4cdbe yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -252,6 +252,11 @@
         else:
             self.io = io_registry[self.dataset_type](self.ds)
 
+    def _reset_particle_count(self):
+        self.grid_particle_count[:] = self.stream_handler.particle_count
+        for i, grid in enumerate(self.grids):
+            grid.NumberOfParticles = self.grid_particle_count[i]
+
     def update_data(self, data):
 
         """
@@ -270,7 +275,7 @@
 
         for i, grid in enumerate(self.grids):
             if "number_of_particles" in data[i]:
-                grid.NumberOfParticles = data[i].pop("number_of_particles")
+                self.stream_handler.particle_count[i] = data[i].pop("number_of_particles")
             field_units, gdata = unitify_data(data[i])
             self.stream_handler.field_units.update(field_units)
             for field in gdata:
@@ -278,6 +283,7 @@
                     grid.field_data.pop(field, None)
                 self.stream_handler.fields[grid.id][field] = gdata[field]
 
+        self._reset_particle_count()
         # We only want to create a superset of fields here.
         self._detect_output_fields()
         self.ds.create_field_info()
@@ -404,13 +410,28 @@
             particle_types[key] = False
     return particle_types
 
-def assign_particle_data(ds, ptype, pdata):
+def assign_particle_data(ds, pdata):
 
     """
     Assign particle data to the grids using MatchPointsToGrids. This
     will overwrite any existing particle data, so be careful!
     """
 
+    for ptype in ds.particle_types_raw:
+        check_fields = [(ptype, "particle_position_x"),
+                        (ptype, "particle_position")]
+        if all(f not in pdata for f in check_fields):
+            pdata_ftype = {}
+            for f in [k for k in sorted(pdata)]:
+                if not hasattr(pdata[f], "shape"):
+                    continue
+                if f == 'number_of_particles':
+                    continue
+                mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
+                pdata_ftype[ptype, f] = pdata.pop(f)
+            pdata_ftype.update(pdata)
+            pdata = pdata_ftype
+
     # Note: what we need to do here is a bit tricky.  Because occasionally this
     # gets called before we property handle the field detection, we cannot use
     # any information about the index.  Fortunately for us, we can generate
@@ -418,14 +439,7 @@
     # stream handler.
 
     if len(ds.stream_handler.fields) > 1:
-
-        if (ptype, "particle_position_x") in pdata:
-            x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in 'xyz')
-        elif (ptype, "particle_position") in pdata:
-            x, y, z = pdata[ptype, "particle_position"].T
-        else:
-            raise KeyError(
-                "Cannot decompose particle data without position fields!")
+        pdata.pop("number_of_particles", None)
         num_grids = len(ds.stream_handler.fields)
         parent_ids = ds.stream_handler.parent_ids
         num_children = np.zeros(num_grids, dtype='int64')
@@ -442,34 +456,43 @@
                              ds.stream_handler.parent_ids,
                              levels, num_children)
 
-        pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
-        particle_grid_inds = pts.find_points_in_tree()
-        idxs = np.argsort(particle_grid_inds)
-        particle_grid_count = np.bincount(particle_grid_inds.astype("intp"),
-                                          minlength=num_grids)
-        particle_indices = np.zeros(num_grids + 1, dtype='int64')
-        if num_grids > 1:
-            np.add.accumulate(particle_grid_count.squeeze(),
-                              out=particle_indices[1:])
-        else:
-            particle_indices[1] = particle_grid_count.squeeze()
+        grid_pdata = []
+        for i in range(num_grids):
+            grid = {"number_of_particles": 0}
+            grid_pdata.append(grid)
 
-        pdata.pop("number_of_particles", None)
-        grid_pdata = []
-        for i, pcount in enumerate(particle_grid_count):
-            grid = {}
-            grid["number_of_particles"] = pcount
-            start = particle_indices[i]
-            end = particle_indices[i+1]
-            for key in pdata.keys():
-                grid[key] = pdata[key][idxs][start:end]
-            grid_pdata.append(grid)
+        for ptype in ds.particle_types_raw:
+            if (ptype, "particle_position_x") in pdata:
+                x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in 'xyz')
+            elif (ptype, "particle_position") in pdata:
+                x, y, z = pdata[ptype, "particle_position"].T
+            else:
+                raise KeyError(
+                    "Cannot decompose particle data without position fields!")
+            pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
+            particle_grid_inds = pts.find_points_in_tree()
+            idxs = np.argsort(particle_grid_inds)
+            particle_grid_count = np.bincount(particle_grid_inds.astype("intp"),
+                                              minlength=num_grids)
+            particle_indices = np.zeros(num_grids + 1, dtype='int64')
+            if num_grids > 1:
+                np.add.accumulate(particle_grid_count.squeeze(),
+                                  out=particle_indices[1:])
+            else:
+                particle_indices[1] = particle_grid_count.squeeze()
+            for i, pcount in enumerate(particle_grid_count):
+                grid_pdata[i]["number_of_particles"] += pcount
+                start = particle_indices[i]
+                end = particle_indices[i+1]
+                for key in pdata.keys():
+                    grid_pdata[i][key] = pdata[key][idxs][start:end]
 
     else:
         grid_pdata = [pdata]
 
     for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
         ds.stream_handler.fields[gi].update(pd)
+        ds.stream_handler.particle_types.update(set_particle_types(pd))
         npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
         ds.stream_handler.particle_count[gi] = npart
 
@@ -719,22 +742,8 @@
 
     # Now figure out where the particles go
     if number_of_particles > 0:
-        for ptype in sds.particle_types_raw:
-            check_fields = [(ptype, "particle_position_x"), 
-                            (ptype, "particle_position")]
-            if all(f not in pdata for f in check_fields):
-                pdata_ftype = {}
-                for f in [k for k in sorted(pdata)]:
-                    if not hasattr(pdata[f], "shape"):
-                        continue
-                    if f == 'number_of_particles':
-                        continue
-                    mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
-                    pdata_ftype[ptype, f] = pdata.pop(f)
-                pdata_ftype.update(pdata)
-                pdata = pdata_ftype
-            # This will update the stream handler too
-            assign_particle_data(sds, ptype, pdata)
+        # This will update the stream handler too
+        assign_particle_data(sds, pdata)
 
     return sds
 
@@ -974,7 +983,7 @@
                 if not isinstance(field, tuple):
                     field = ("unknown", field)
                 fi = ds._get_field_info(*field)
-                if not fi.particle_type :
+                if not fi.particle_type:
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < ds.index.max_level: continue
@@ -996,25 +1005,26 @@
 
         ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox)
 
-        if number_of_particles > 0:
-            for ptype in base_ds.particle_types_raw:
-                if (ptype, "particle_position_x") not in pdata:
-                    pdata_ftype = {}
-                    for f in [k for k in sorted(pdata)]:
-                        if not hasattr(pdata[f], "shape"): continue
-                        mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
-                        pdata_ftype[ptype, f] = pdata.pop(f)
-                    pdata_ftype.update(pdata)
-                    pdata = pdata_ftype
-                assign_particle_data(ds, ptype, pdata)
-            # We need to reassign the field list here.
         cur_gc = ds.index.num_grids
 
+    ds.particle_types_raw = base_ds.particle_types_raw
+    ds.particle_types = ds.particle_types_raw
+
+    # Now figure out where the particles go
+    if number_of_particles > 0:
+        # This will update the stream handler too
+        assign_particle_data(ds, pdata)
+        # Because we've already used the index, we
+        # have to re-create the field info because
+        # we added particle data after the fact
+        ds.index._reset_particle_count()
+        ds.index._detect_output_fields()
+        ds.create_field_info()
+
     return ds
 
 class StreamParticleIndex(ParticleIndex):
 
-
     def __init__(self, ds, dataset_type = None):
         self.stream_handler = ds.stream_handler
         super(StreamParticleIndex, self).__init__(ds, dataset_type)


https://bitbucket.org/yt_analysis/yt/commits/88730ceab92e/
Changeset:   88730ceab92e
User:        jzuhone
Date:        2017-05-22 17:33:25+00:00
Summary:     Bugfix
Affected #:  1 file

diff -r f868186b3f79dd476d2ca8a7d69ca79160a4cdbe -r 88730ceab92e20a993029a46ef3380eb2d5ed18f yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -255,15 +255,14 @@
     def _reset_particle_count(self):
         self.grid_particle_count[:] = self.stream_handler.particle_count
         for i, grid in enumerate(self.grids):
-            grid.NumberOfParticles = self.grid_particle_count[i]
+            grid.NumberOfParticles = self.grid_particle_count[i, 0]
 
     def update_data(self, data):
-
         """
         Update the stream data with a new data dict. If fields already exist,
         they will be replaced, but if they do not, they will be added. Fields
         already in the stream but not part of the data dict will be left
-        alone. 
+        alone.
         """
         particle_types = set_particle_types(data[0])
 


https://bitbucket.org/yt_analysis/yt/commits/a445aac64e7c/
Changeset:   a445aac64e7c
User:        jzuhone
Date:        2017-05-22 18:51:44+00:00
Summary:     Make sure we only handle the current particle type
Affected #:  1 file

diff -r 88730ceab92e20a993029a46ef3380eb2d5ed18f -r a445aac64e7c36c459ed67f9f686ee88e9ed2427 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -484,7 +484,8 @@
                 start = particle_indices[i]
                 end = particle_indices[i+1]
                 for key in pdata.keys():
-                    grid_pdata[i][key] = pdata[key][idxs][start:end]
+                    if key[0] == ptype:
+                        grid_pdata[i][key] = pdata[key][idxs][start:end]
 
     else:
         grid_pdata = [pdata]
@@ -651,6 +652,7 @@
     # First we fix our field names
     field_units, data = unitify_data(data)
 
+    """
     for field_name in data:
         fshape = data[field_name].shape
         dshape = tuple(domain_dimensions)
@@ -660,13 +662,14 @@
                    "domain_dimensions %s or number of particles %s")
             msg = msg % (fshape, field_name, dshape, pshape)
             raise RuntimeError(msg)
+    """
 
     sfh = StreamDictFieldHandler()
 
     if number_of_particles > 0:
         particle_types = set_particle_types(data)
-        pdata = {} # Used much further below.
-        pdata["number_of_particles"] = number_of_particles
+        # Used much further below.
+        pdata = {"number_of_particles": number_of_particles}
         for key in list(data.keys()):
             if len(data[key].shape) == 1 or key[0] == 'io':
                 if not isinstance(key, tuple):


https://bitbucket.org/yt_analysis/yt/commits/447bc08f7c53/
Changeset:   447bc08f7c53
User:        jzuhone
Date:        2017-05-22 19:02:15+00:00
Summary:     Restore check for fluid fields
Affected #:  1 file

diff -r a445aac64e7c36c459ed67f9f686ee88e9ed2427 -r 447bc08f7c53ef02c420be556360e9680ea1e43b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -652,17 +652,14 @@
     # First we fix our field names
     field_units, data = unitify_data(data)
 
-    """
+    dshape = tuple(domain_dimensions)
     for field_name in data:
         fshape = data[field_name].shape
-        dshape = tuple(domain_dimensions)
-        pshape = (number_of_particles, )
-        if fshape != dshape and fshape != pshape:
+        if len(fshape) == 3 and fshape != dshape:
             msg = ("Input data shape %s for field %s does not match provided "
-                   "domain_dimensions %s or number of particles %s")
-            msg = msg % (fshape, field_name, dshape, pshape)
+                   "domain_dimensions %s!")
+            msg = msg % (fshape, field_name, dshape)
             raise RuntimeError(msg)
-    """
 
     sfh = StreamDictFieldHandler()
 


https://bitbucket.org/yt_analysis/yt/commits/816f59e4d297/
Changeset:   816f59e4d297
User:        jzuhone
Date:        2017-05-22 19:02:23+00:00
Summary:     Update the test
Affected #:  1 file

diff -r 447bc08f7c53ef02c420be556360e9680ea1e43b -r 816f59e4d297eafaf5cc4933cabb095632a688a2 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -67,6 +67,12 @@
     assert_equal(number_of_particles1, num_particles)
     assert_equal(number_of_particles1, number_of_particles2)
 
+    for grid in ug2.index.grids:
+        tot_parts = grid["io","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
     # Check to make sure the fields have been defined correctly
 
     for ptype in ("all", "io"):
@@ -114,6 +120,18 @@
     assert_equal(np.sum(number_of_particles1), num_particles)
     assert_equal(number_of_particles1, number_of_particles2)
 
+    for grid in amr1.index.grids:
+        tot_parts = grid["io","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+    for grid in amr2.index.grids:
+        tot_parts = grid["io","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
     assert amr1._get_field_info("all", "particle_position_x").particle_type
     assert amr1._get_field_info("all", "particle_position_y").particle_type
     assert amr1._get_field_info("all", "particle_position_z").particle_type
@@ -125,3 +143,118 @@
     assert amr2._get_field_info("all", "particle_position_z").particle_type
     assert amr2._get_field_info("all", "particle_mass").particle_type
     assert not amr2._get_field_info("gas", "density").particle_type
+
+    # Now perform similar checks, but with multiple particle types
+
+    num_dm_particles = 30000
+    xd = np.random.uniform(size=num_dm_particles)
+    yd = np.random.uniform(size=num_dm_particles)
+    zd = np.random.uniform(size=num_dm_particles)
+    md = np.ones(num_dm_particles)
+
+    num_star_particles = 20000
+    xs = np.random.uniform(size=num_star_particles)
+    ys = np.random.uniform(size=num_star_particles)
+    zs = np.random.uniform(size=num_star_particles)
+    ms = 2.0*np.ones(num_star_particles)
+
+    dens = np.random.random(domain_dims)
+
+    fields3 = {"density": dens,
+               ("dm", "particle_position_x"): xd,
+               ("dm", "particle_position_y"): yd,
+               ("dm", "particle_position_z"): zd,
+               ("dm", "particle_mass"): md,
+               ("star", "particle_position_x"): xs,
+               ("star", "particle_position_y"): ys,
+               ("star", "particle_position_z"): zs,
+               ("star", "particle_mass"): ms,
+               "number_of_particles": num_dm_particles+num_star_particles}
+
+    fields4 = fields3.copy()
+
+    ug3 = load_uniform_grid(fields3, domain_dims, 1.0)
+    ug4 = load_uniform_grid(fields4, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles3 = np.sum([grid.NumberOfParticles for grid in ug3.index.grids])
+    number_of_particles4 = np.sum([grid.NumberOfParticles for grid in ug4.index.grids])
+
+    assert_equal(number_of_particles3, num_dm_particles+num_star_particles)
+    assert_equal(number_of_particles3, number_of_particles4)
+
+    for grid in ug4.index.grids:
+        tot_parts = grid["dm","particle_position_x"].size
+        tot_parts += grid["star","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+    # Check to make sure the fields have been defined correctly
+
+    for ptype in ("dm", "star"):
+        assert ug3._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug3._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug3._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug3._get_field_info(ptype, "particle_mass").particle_type
+        assert ug4._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug4._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug4._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug4._get_field_info(ptype, "particle_mass").particle_type
+
+    # Now refine this
+
+    amr3 = refine_amr(ug3, rc, fo, 3)
+    for field in sorted(ug3.field_list):
+        assert field in amr3.field_list
+
+    grid_data = []
+
+    for grid in amr3.index.grids:
+
+        data = dict(left_edge=grid.LeftEdge,
+                    right_edge=grid.RightEdge,
+                    level=grid.Level,
+                    dimensions=grid.ActiveDimensions,
+                    number_of_particles=grid.NumberOfParticles)
+
+        for field in amr3.field_list:
+            data[field] = grid[field]
+
+        grid_data.append(data)
+
+    amr4 = load_amr_grids(grid_data, domain_dims)
+
+    # Check everything again
+
+    number_of_particles3 = [grid.NumberOfParticles for grid in amr3.index.grids]
+    number_of_particles4 = [grid.NumberOfParticles for grid in amr4.index.grids]
+
+    assert_equal(np.sum(number_of_particles3), num_star_particles+num_dm_particles)
+    assert_equal(number_of_particles3, number_of_particles4)
+
+    for ptype in ("dm", "star"):
+        assert amr3._get_field_info(ptype, "particle_position_x").particle_type
+        assert amr3._get_field_info(ptype, "particle_position_y").particle_type
+        assert amr3._get_field_info(ptype, "particle_position_z").particle_type
+        assert amr3._get_field_info(ptype, "particle_mass").particle_type
+        assert amr4._get_field_info(ptype, "particle_position_x").particle_type
+        assert amr4._get_field_info(ptype, "particle_position_y").particle_type
+        assert amr4._get_field_info(ptype, "particle_position_z").particle_type
+        assert amr4._get_field_info(ptype, "particle_mass").particle_type
+
+    for grid in amr3.index.grids:
+        tot_parts = grid["dm","particle_position_x"].size
+        tot_parts += grid["star","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+    for grid in amr4.index.grids:
+        tot_parts = grid["dm", "particle_position_x"].size
+        tot_parts += grid["star", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+


https://bitbucket.org/yt_analysis/yt/commits/4472d64b7d39/
Changeset:   4472d64b7d39
User:        jzuhone
Date:        2017-05-22 20:07:46+00:00
Summary:     A test for load_particles and multiple types
Affected #:  1 file

diff -r 816f59e4d297eafaf5cc4933cabb095632a688a2 -r 4472d64b7d39ab6a9539021d0dfc1f27eb18d396 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -2,7 +2,10 @@
 
 from yt.testing import \
     assert_equal
-from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+from yt.frontends.stream.api import load_uniform_grid, \
+    refine_amr, \
+    load_amr_grids, \
+    load_particles
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 
@@ -258,3 +261,49 @@
         assert tot_parts == grid.NumberOfParticles
         assert tot_all_parts == grid.NumberOfParticles
 
+def test_load_particles_types():
+
+    num_particles = 100000
+
+    data1 = {"particle_position_x": np.random.random(size=num_particles),
+             "particle_position_y": np.random.random(size=num_particles),
+             "particle_position_z": np.random.random(size=num_particles),
+             "particle_mass": np.ones(num_particles)}
+
+    ds1 = load_particles(data1)
+    ds1.index
+
+    assert set(ds1.particle_types) == {"all", "io"}
+
+    dd = ds1.all_data()
+
+    for ax in "xyz":
+        assert dd["io", "particle_position_%s" % ax].size == num_particles
+        assert dd["all", "particle_position_%s" % ax].size == num_particles
+
+    num_dm_particles = 10000
+    num_star_particles = 50000
+    num_tot_particles = num_dm_particles + num_star_particles
+
+    data2 = {("dm", "particle_position_x"): np.random.random(size=num_dm_particles),
+             ("dm", "particle_position_y"): np.random.random(size=num_dm_particles),
+             ("dm", "particle_position_z"): np.random.random(size=num_dm_particles),
+             ("dm", "particle_mass"): np.ones(num_dm_particles),
+             ("star", "particle_position_x"): np.random.random(size=num_star_particles),
+             ("star", "particle_position_y"): np.random.random(size=num_star_particles),
+             ("star", "particle_position_z"): np.random.random(size=num_star_particles),
+             ("star", "particle_mass"): 2.0*np.ones(num_star_particles)}
+
+    ds2 = load_particles(data2)
+    ds2.index
+
+    assert set(ds2.particle_types) == {"all", "star", "dm"}
+
+    dd = ds2.all_data()
+
+    for ax in "xyz":
+        npart = 0
+        for ptype in ds2.particle_types_raw:
+            npart += dd[ptype, "particle_position_%s" % ax].size
+        assert npart == num_tot_particles
+        assert dd["all", "particle_position_%s" % ax].size == num_tot_particles


https://bitbucket.org/yt_analysis/yt/commits/21c93505a53d/
Changeset:   21c93505a53d
User:        jzuhone
Date:        2017-05-22 20:24:06+00:00
Summary:     Let particle generators use a different ptype
Affected #:  1 file

diff -r 4472d64b7d39ab6a9539021d0dfc1f27eb18d396 -r 21c93505a53d510183177f10ed781cb16971fcc8 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -7,11 +7,7 @@
 
 class ParticleGenerator(object):
 
-    default_fields = [("io", "particle_position_x"),
-                      ("io", "particle_position_y"),
-                      ("io", "particle_position_z")]
-    
-    def __init__(self, ds, num_particles, field_list):
+    def __init__(self, ds, num_particles, field_list, ptype="io"):
         """
         Base class for generating particle fields which may be applied to
         streams. Normally this would not be called directly, since it doesn't
@@ -21,14 +17,17 @@
         """
         self.ds = ds
         self.num_particles = num_particles
-        self.field_list = [("io",fd) if isinstance(fd,string_types) else fd
+        self.field_list = [(ptype, fd) if isinstance(fd,string_types) else fd
                            for fd in field_list]
-        self.field_list.append(("io", "particle_index"))
+        self.field_list.append((ptype, "particle_index"))
         self.field_units = dict(
-          (('io', 'particle_position_%s' % ax), 'code_length')
+          ((ptype, 'particle_position_%s' % ax), 'code_length')
           for ax in 'xyz')
-        self.field_units['io', 'particle_index'] = ''
-        
+        self.field_units[ptype, 'particle_index'] = ''
+        self.ptype = ptype
+
+        self._set_default_fields()
+
         try:
             self.posx_index = self.field_list.index(self.default_fields[0])
             self.posy_index = self.field_list.index(self.default_fields[1])
@@ -36,35 +35,40 @@
         except:
             raise KeyError("You must specify position fields: " +
                            " ".join(["particle_position_%s" % ax for ax in "xyz"]))
-        self.index_index = self.field_list.index(("io", "particle_index"))
-        
+        self.index_index = self.field_list.index((ptype, "particle_index"))
+
         self.num_grids = self.ds.index.num_grids
-        self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
+        self.NumberOfParticles = np.zeros(self.num_grids, dtype='int64')
         self.ParticleGridIndices = np.zeros(self.num_grids + 1, dtype='int64')
-        
+
         self.num_fields = len(self.field_list)
-        
+
         self.particles = np.zeros((self.num_particles, self.num_fields),
                                   dtype='float64')
 
+    def _set_default_fields(self):
+        self.default_fields = [(self.ptype, "particle_position_x"),
+                               (self.ptype, "particle_position_y"),
+                               (self.ptype, "particle_position_z")]
+
     def has_key(self, key):
         """
         Check to see if *key* is in the particle field list.
         """
         return key in self.field_list
-            
+
     def keys(self):
         """
         Return the list of particle fields.
         """
         return self.field_list
-    
+
     def __getitem__(self, key):
         """
         Get the field associated with key.
         """
         return self.particles[:,self.field_list.index(key)]
-    
+
     def __setitem__(self, key, val):
         """
         Sets a field to be some other value. Note that we assume
@@ -72,7 +76,7 @@
         make sure the setting of the field is consistent with this.
         """
         self.particles[:,self.field_list.index(key)] = val[:]
-                
+
     def __len__(self):
         """
         The number of particles
@@ -95,17 +99,17 @@
             else:
                 tr[field] = self.particles[start:end, fi]
         return tr
-    
-    def _setup_particles(self,x,y,z,setup_fields=None):
+
+    def _setup_particles(self, x, y, z, setup_fields=None):
         """
         Assigns grids to particles and sets up particle positions. *setup_fields* is
         a dict of fields other than the particle positions to set up. 
         """
-        particle_grids, particle_grid_inds = self.ds.index._find_points(x,y,z)
+        particle_grids, particle_grid_inds = self.ds.index._find_points(x, y, z)
         idxs = np.argsort(particle_grid_inds)
-        self.particles[:,self.posx_index] = x[idxs]
-        self.particles[:,self.posy_index] = y[idxs]
-        self.particles[:,self.posz_index] = z[idxs]
+        self.particles[:, self.posx_index] = x[idxs]
+        self.particles[:, self.posy_index] = y[idxs]
+        self.particles[:, self.posz_index] = z[idxs]
         self.NumberOfParticles = np.bincount(particle_grid_inds.astype("intp"),
                                              minlength=self.num_grids)
         if self.num_grids > 1:
@@ -115,20 +119,20 @@
             self.ParticleGridIndices[1] = self.NumberOfParticles.squeeze()
         if setup_fields is not None:
             for key, value in setup_fields.items():
-                field = ("io",key) if isinstance(key, string_types) else key
+                field = (self.ptype, key) if isinstance(key, string_types) else key
                 if field not in self.default_fields:
                     self.particles[:,self.field_list.index(field)] = value[idxs]
-    
+
     def assign_indices(self, function=None, **kwargs):
         """
         Assign unique indices to the particles. The default is to just use
         numpy.arange, but any function may be supplied with keyword arguments.
         """
-        if function is None :
-            self.particles[:,self.index_index] = np.arange((self.num_particles))
-        else :
-            self.particles[:,self.index_index] = function(**kwargs)
-            
+        if function is None:
+            self.particles[:, self.index_index] = np.arange(self.num_particles)
+        else:
+            self.particles[:, self.index_index] = function(**kwargs)
+
     def map_grid_fields_to_particles(self, mapping_dict):
         r"""
         For the fields in  *mapping_dict*, map grid fields to the particles
@@ -203,7 +207,7 @@
 
 class FromListParticleGenerator(ParticleGenerator):
 
-    def __init__(self, ds, num_particles, data):
+    def __init__(self, ds, num_particles, data, ptype="io"):
         r"""
         Generate particle fields from array-like lists contained in a dict.
 
@@ -233,10 +237,10 @@
             x = data.pop("particle_position_x")
             y = data.pop("particle_position_y")
             z = data.pop("particle_position_z")
-        elif ("io","particle_position_x") in data:
-            x = data.pop(("io", "particle_position_x"))
-            y = data.pop(("io", "particle_position_y"))
-            z = data.pop(("io", "particle_position_z"))
+        elif (ptype,"particle_position_x") in data:
+            x = data.pop((ptype, "particle_position_x"))
+            y = data.pop((ptype, "particle_position_y"))
+            z = data.pop((ptype, "particle_position_z"))
 
         xcond = np.logical_or(x < ds.domain_left_edge[0],
                               x >= ds.domain_right_edge[0])
@@ -250,13 +254,14 @@
         if np.any(cond):
             raise ValueError("Some particles are outside of the domain!!!")
 
-        ParticleGenerator.__init__(self, ds, num_particles, field_list)
-        self._setup_particles(x,y,z,setup_fields=data)
-        
+        super(FromListParticleGenerator, self).__init__(ds, num_particles, 
+                                                        field_list, ptype=ptype)
+        self._setup_particles(x, y, z, setup_fields=data)
+
 class LatticeParticleGenerator(ParticleGenerator):
 
     def __init__(self, ds, particles_dims, particles_left_edge,
-                 particles_right_edge, field_list):
+                 particles_right_edge, field_list, ptype="io"):
         r"""
         Generate particles in a lattice arrangement. 
 
@@ -272,7 +277,7 @@
              The 'right-most' ending positions of the lattice.
         field_list : list of strings
              A list of particle fields
-             
+
         Examples
         --------
         >>> dims = (128,128,128)
@@ -293,33 +298,34 @@
         xmax = particles_right_edge[0]
         ymax = particles_right_edge[1]
         zmax = particles_right_edge[2]
-        DLE = ds.domain_left_edge.in_units("code_length").ndarray_view()
-        DRE = ds.domain_right_edge.in_units("code_length").ndarray_view()
+        DLE = ds.domain_left_edge.in_units("code_length").d
+        DRE = ds.domain_right_edge.in_units("code_length").d
 
         xcond = (xmin < DLE[0]) or (xmax >= DRE[0])
         ycond = (ymin < DLE[1]) or (ymax >= DRE[1])
         zcond = (zmin < DLE[2]) or (zmax >= DRE[2])
         cond = xcond or ycond or zcond
 
-        if cond :
+        if cond:
             raise ValueError("Proposed bounds for particles are outside domain!!!")
 
-        ParticleGenerator.__init__(self, ds, num_x*num_y*num_z, field_list)
+        super(LatticeParticleGenerator, self).__init__(ds, num_x*num_y*num_z, 
+                                                       field_list, ptype=ptype)
 
         dx = (xmax-xmin)/(num_x-1)
         dy = (ymax-ymin)/(num_y-1)
         dz = (zmax-zmin)/(num_z-1)
-        inds = np.indices((num_x,num_y,num_z))
+        inds = np.indices((num_x, num_y, num_z))
         xpos = inds[0]*dx + xmin
         ypos = inds[1]*dy + ymin
         zpos = inds[2]*dz + zmin
-        
+
         self._setup_particles(xpos.flat[:], ypos.flat[:], zpos.flat[:])
-        
+
 class WithDensityParticleGenerator(ParticleGenerator):
 
     def __init__(self, ds, data_source, num_particles, field_list,
-                 density_field="density"):
+                 density_field="density", ptype="io"):
         r"""
         Generate particles based on a density field.
 
@@ -336,7 +342,7 @@
         density_field : string, optional
             A density field which will serve as the distribution function for the
             particle positions. Theoretically, this could be any 'per-volume' field. 
-            
+
         Examples
         --------
         >>> sphere = ds.sphere(ds.domain_center, 0.5)
@@ -348,7 +354,8 @@
         >>>                                          fields, density_field='Dark_Matter_Density')
         """
 
-        ParticleGenerator.__init__(self, ds, num_particles, field_list)
+        super(WithDensityParticleGenerator, self).__init__(ds, num_particles,
+                                                           field_list, ptype=ptype)
 
         num_cells = len(data_source["x"].flat)
         max_mass = (data_source[density_field]*
@@ -357,22 +364,22 @@
         all_x = []
         all_y = []
         all_z = []
-        
+
         pbar = get_pbar("Generating Particles", num_particles)
         tot_num_accepted = int(0)
-        
+
         while num_particles_left > 0:
 
             m = np.random.uniform(high=1.01*max_mass,
                                   size=num_particles_left)
             idxs = np.random.random_integers(low=0, high=num_cells-1,
                                              size=num_particles_left)
-            m_true = (data_source[density_field]*
+            m_true = (data_source[density_field] *
                       data_source["cell_volume"]).flat[idxs]
             accept = m <= m_true
             num_accepted = accept.sum()
             accepted_idxs = idxs[accept]
-            
+
             xpos = data_source["x"].flat[accepted_idxs] + \
                    np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                    data_source["dx"].flat[accepted_idxs]
@@ -397,5 +404,5 @@
         y = uconcatenate(all_y)
         z = uconcatenate(all_z)
 
-        self._setup_particles(x,y,z)
-        
+        self._setup_particles(x, y, z)
+


https://bitbucket.org/yt_analysis/yt/commits/ac14439f6180/
Changeset:   ac14439f6180
User:        jzuhone
Date:        2017-05-22 21:46:29+00:00
Summary:     Remove particle union fields from the field list before we remake them
Affected #:  1 file

diff -r 21c93505a53d510183177f10ed781cb16971fcc8 -r ac14439f6180ee3ba62663841d12a5e42b2b5db8 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -284,6 +284,9 @@
 
         self._reset_particle_count()
         # We only want to create a superset of fields here.
+        for field in self.ds.field_list:
+            if field[0] == "all":
+                self.ds.field_list.remove(field)
         self._detect_output_fields()
         self.ds.create_field_info()
         mylog.debug("Creating Particle Union 'all'")


https://bitbucket.org/yt_analysis/yt/commits/961ca0c1814f/
Changeset:   961ca0c1814f
User:        jzuhone
Date:        2017-05-22 21:48:41+00:00
Summary:     Making particle generators work with multiple field types. We had to change some behavior here—we no longer create a bunch of zero-valued particles
Affected #:  2 files

diff -r ac14439f6180ee3ba62663841d12a5e42b2b5db8 -r 961ca0c1814fa1eda33daee87086a17fb1ae60d4 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -17,7 +17,7 @@
         """
         self.ds = ds
         self.num_particles = num_particles
-        self.field_list = [(ptype, fd) if isinstance(fd,string_types) else fd
+        self.field_list = [(ptype, fd) if isinstance(fd, string_types) else fd
                            for fd in field_list]
         self.field_list.append((ptype, "particle_index"))
         self.field_units = dict(
@@ -168,11 +168,12 @@
 
     def apply_to_stream(self, clobber=False):
         """
-        Apply the particles to a stream dataset. If particles already exist,
-        and clobber=False, do not overwrite them, but add the new ones to them. 
+        Apply the particles to a grid-based stream dataset. If particles 
+        already exist, and clobber=False, do not overwrite them, but add 
+        the new ones to them.
         """
         grid_data = []
-        for i,g in enumerate(self.ds.index.grids):
+        for i, g in enumerate(self.ds.index.grids):
             data = {}
             if clobber:
                 data["number_of_particles"] = self.NumberOfParticles[i]
@@ -182,22 +183,15 @@
             grid_particles = self.get_for_grid(g)
             for field in self.field_list:
                 if data["number_of_particles"] > 0:
-                    # We have particles in this grid
-                    if g.NumberOfParticles > 0 and not clobber:
-                        # Particles already exist
-                        if field in self.ds.field_list:
-                            # This field already exists
-                            prev_particles = g[field]
-                        else:
-                            # This one doesn't, set the previous particles' field
-                            # values to zero
-                            prev_particles = np.zeros(g.NumberOfParticles)
-                            prev_particles = self.ds.arr(prev_particles,
-                                                         self.field_units[field])
-                        data[field] = uconcatenate((prev_particles,
-                                                    grid_particles[field]))
+                    if g.NumberOfParticles > 0 and not clobber and \
+                        field in self.ds.field_list:
+                        # We have particles in this grid, we're not
+                        # overwriting them, and the field is in the field
+                        # list already
+                        data[field] = uconcatenate([g[field],
+                                                    grid_particles[field]])
                     else:
-                        # Particles do not already exist or we're clobbering
+                        # Otherwise, simply add the field in
                         data[field] = grid_particles[field]
                 else:
                     # We don't have particles in this grid

diff -r ac14439f6180ee3ba62663841d12a5e42b2b5db8 -r 961ca0c1814fa1eda33daee87086a17fb1ae60d4 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -37,7 +37,7 @@
     particles1 = WithDensityParticleGenerator(ds, sphere, num_particles, field_list)
     particles1.assign_indices()
     particles1.map_grid_fields_to_particles(field_dict)
-    
+
     # Test to make sure we ended up with the right number of particles per grid
     particles1.apply_to_stream()
     particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
@@ -49,23 +49,20 @@
     assert(np.unique(tags).size == num_particles)
 
     del tags
-    
+
     # Set up a lattice of particles
     pdims = np.array([32,32,32])
-    def new_indices() :
+    def new_indices():
         # We just add new indices onto the existing ones
         return np.arange((np.product(pdims)))+num_particles
     le = np.array([0.25,0.25,0.25])
     re = np.array([0.75,0.75,0.75])
-    new_field_list = field_list + [("io", "particle_gas_temperature")]
-    new_field_dict = {("gas", "density"): ("io", "particle_gas_density"),
-                      ("gas", "temperature"): ("io", "particle_gas_temperature")}
 
-    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
+    particles2 = LatticeParticleGenerator(ds, pdims, le, re, field_list)
     particles2.assign_indices(function=new_indices)
-    particles2.map_grid_fields_to_particles(new_field_dict)
+    particles2.map_grid_fields_to_particles(field_dict)
 
-    #Test lattice positions
+    # Test lattice positions
     xpos = np.unique(particles2["io", "particle_position_x"])
     ypos = np.unique(particles2["io", "particle_position_y"])
     zpos = np.unique(particles2["io", "particle_position_z"])
@@ -74,14 +71,14 @@
     ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
     zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
 
-    assert_almost_equal( xpos, xpred)
-    assert_almost_equal( ypos, ypred)
-    assert_almost_equal( zpos, zpred)
+    assert_almost_equal(xpos, xpred)
+    assert_almost_equal(ypos, ypred)
+    assert_almost_equal(zpos, zpred)
 
     del xpos, ypos, zpos
     del xpred, ypred, zpred
 
-    #Test the number of particles again
+    # Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
     assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
@@ -90,43 +87,54 @@
     particles_per_grid2 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
     assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
 
-    #Test the uniqueness of tags
+    # Test the uniqueness of tags
     tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
     tags.sort()
     assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
 
     del tags
 
-    # Test that the old particles have zero for the new field
-    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
-                          for i, grid in enumerate(ds.index.grids)]
-    test_zeros = [np.zeros((particles_per_grid1[i])) 
-                  for i, grid in enumerate(ds.index.grids)]
-    assert_equal(old_particle_temps, test_zeros)
-
-    del old_particle_temps
-    del test_zeros
-
-    #Now dump all of these particle fields out into a dict
+    # Now dump all of these particle fields out into a dict
     pdata = {}
     dd = ds.all_data()
-    for field in new_field_list :
+    for field in field_list:
         pdata[field] = dd[field]
 
-    #Test the "from-list" generator and particle field clobber
-    particles3 = FromListParticleGenerator(ds, num_particles+np.product(pdims), pdata)
+    # Test the "from-list" generator and particle field clobber
+    num_particles3 = num_particles+np.product(pdims)
+    particles3 = FromListParticleGenerator(ds, num_particles3, pdata)
     particles3.apply_to_stream(clobber=True)
-    
-    #Test the number of particles again
+
+    # Test the number of particles again
     particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
     particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
+    assert_equal(particles_per_grid2, particles_per_grid3)
+
+    # Test adding in particles with a different particle type
+
+    num_star_particles = 100000
+    pdata2 = {("star", "particle_position_x"): np.random.uniform(size=num_star_particles),
+              ("star", "particle_position_y"): np.random.uniform(size=num_star_particles),
+              ("star", "particle_position_z"): np.random.uniform(size=num_star_particles)}
+
+    particles4 = FromListParticleGenerator(ds, num_star_particles, pdata2, ptype="star")
+    particles4.apply_to_stream()
+
+    dd = ds.all_data()
+    assert dd["star", "particle_position_x"].size == num_star_particles
+    assert dd["io", "particle_position_x"].size == num_particles3
+    assert dd["all", "particle_position_x"].size == num_star_particles+num_particles3
+
 
     del pdata
+    del pdata2
     del ds
     del particles1
     del particles2
+    del particles4
     del fields
     del dens
     del temp
+


https://bitbucket.org/yt_analysis/yt/commits/f6b53b637740/
Changeset:   f6b53b637740
User:        jzuhone
Date:        2017-05-22 22:14:21+00:00
Summary:     Update generic particle docs
Affected #:  1 file

diff -r 961ca0c1814fa1eda33daee87086a17fb1ae60d4 -r f6b53b637740cc9497cdbf4bfc1b987d57987c6a doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Particle_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -19,7 +19,7 @@
    "source": [
     "import numpy as np\n",
     "\n",
-    "n_particles = 5e6\n",
+    "n_particles = 5000000\n",
     "\n",
     "ppx, ppy, ppz = 1e6*np.random.normal(size=[3, n_particles])\n",
     "\n",
@@ -139,12 +139,63 @@
   {
    "cell_type": "code",
    "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+    "slc.set_width((8, 'Mpc'))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, one can specify multiple particle types in the `data` directory by setting the field names to be field tuples (the default field type for particles is `\"io\"`) if one is not specified:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "n_star_particles = 1000000\n",
+    "n_dm_particles = 2000000\n",
+    "\n",
+    "ppxd, ppyd, ppzd = 1e6*np.random.normal(size=[3, n_dm_particles])\n",
+    "ppmd = np.ones(n_dm_particles)\n",
+    "\n",
+    "ppxs, ppys, ppzs = 5e5*np.random.normal(size=[3, n_star_particles])\n",
+    "ppms = 0.1*np.ones(n_star_particles)\n",
+    "\n",
+    "data2 = {('dm', 'particle_position_x'): ppxd,\n",
+    "         ('dm', 'particle_position_y'): ppyd,\n",
+    "         ('dm', 'particle_position_z'): ppzd,\n",
+    "         ('dm', 'particle_mass'): ppmd,\n",
+    "         ('star', 'particle_position_x'): ppxs,\n",
+    "         ('star', 'particle_position_y'): ppys,\n",
+    "         ('star', 'particle_position_z'): ppzs,\n",
+    "         ('star', 'particle_mass'): ppms}\n",
+    "\n",
+    "ds2 = yt.load_particles(data2, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now have separate `\"dm\"` and `\"star\"` particles, as well as their deposited fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {
     "collapsed": false
    },
    "outputs": [],
    "source": [
-    "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+    "slc = yt.SlicePlot(ds2, 2, [('deposit', 'dm_cic'), ('deposit', 'star_cic')])\n",
     "slc.set_width((8, 'Mpc'))"
    ]
   }


https://bitbucket.org/yt_analysis/yt/commits/84463c9556c4/
Changeset:   84463c9556c4
User:        jzuhone
Date:        2017-05-22 22:25:45+00:00
Summary:     Update docs for generic array data
Affected #:  1 file

diff -r f6b53b637740cc9497cdbf4bfc1b987d57987c6a -r 84463c9556c40e8fb6fe66460ef42c40846fec6b doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -163,7 +163,7 @@
     "            particle_position_z = (posz_arr, 'code_length'))\n",
     "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
     "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n",
-    "                       bbox=bbox, nprocs=4)"
+    "                          bbox=bbox, nprocs=4)"
    ]
   },
   {
@@ -638,6 +638,65 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "## Multiple Particle Types"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For both uniform grid data and AMR data, one can specify particle fields with multiple types if the particle field names are given as field tuples instead of strings (the default particle type is `\"io\"`). Make sure that the total number of particles given in the `number_of_particles` key is set to the sum of the number of particles for all types:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "posxr_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
+    "posyr_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
+    "poszr_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
+    "posxb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
+    "posyb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
+    "poszb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
+    "data = {\"density\": (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
+    "        \"number_of_particles\": 30000,\n",
+    "        (\"red\", \"particle_position_x\"): (posxr_arr, 'code_length'), \n",
+    "        (\"red\", \"particle_position_y\"): (posyr_arr, 'code_length'),\n",
+    "        (\"red\", \"particle_position_z\"): (poszr_arr, 'code_length'),\n",
+    "        (\"blue\", \"particle_position_x\"): (posxb_arr, 'code_length'), \n",
+    "        (\"blue\", \"particle_position_y\"): (posyb_arr, 'code_length'),\n",
+    "        (\"blue\", \"particle_position_z\"): (poszb_arr, 'code_length')}\n",
+    "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
+    "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n",
+    "                          bbox=bbox, nprocs=4)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can now see we have multiple particle types:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dd = ds.all_data()\n",
+    "print (ds.particle_types)\n",
+    "print (dd[\"red\", \"particle_position_x\"].size)\n",
+    "print (dd[\"blue\", \"particle_position_x\"].size)\n",
+    "print (dd[\"all\", \"particle_position_x\"].size)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "## Caveats for Loading Generic Array Data"
    ]
   },


https://bitbucket.org/yt_analysis/yt/commits/5d5d95de680d/
Changeset:   5d5d95de680d
User:        jzuhone
Date:        2017-05-22 22:27:51+00:00
Summary:     Add ptype to docstrings
Affected #:  1 file

diff -r 84463c9556c40e8fb6fe66460ef42c40846fec6b -r 5d5d95de680dba4d203a3d1f2870483758f30169 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -13,7 +13,8 @@
         streams. Normally this would not be called directly, since it doesn't
         really do anything except allocate memory. Takes a *ds* to serve as the
         basis for determining grids, the number of particles *num_particles*,
-        and a list of fields, *field_list*.
+        a list of fields, *field_list*, and the particle type *ptype*, which
+        has a default value of "io". 
         """
         self.ds = ds
         self.num_particles = num_particles
@@ -213,6 +214,8 @@
             The number of particles in the dict.
         data : dict of NumPy arrays
             The particle fields themselves.
+        ptype : string, optional
+            The particle type for these particle fields. Default: "io"
 
         Examples
         --------
@@ -271,6 +274,8 @@
              The 'right-most' ending positions of the lattice.
         field_list : list of strings
              A list of particle fields
+        ptype : string, optional
+            The particle type for these particle fields. Default: "io"
 
         Examples
         --------
@@ -336,6 +341,8 @@
         density_field : string, optional
             A density field which will serve as the distribution function for the
             particle positions. Theoretically, this could be any 'per-volume' field. 
+        ptype : string, optional
+            The particle type for these particle fields. Default: "io"
 
         Examples
         --------


https://bitbucket.org/yt_analysis/yt/commits/f44f1ea7df23/
Changeset:   f44f1ea7df23
User:        jzuhone
Date:        2017-05-24 12:31:19+00:00
Summary:     Incorporating Matt’s suggestions
Affected #:  1 file

diff -r 5d5d95de680dba4d203a3d1f2870483758f30169 -r f44f1ea7df23be4b07c0139f1406bd084b4f89a9 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -267,7 +267,7 @@
         particle_types = set_particle_types(data[0])
 
         for key in data[0].keys():
-            if key is "number_of_particles":
+            if key == "number_of_particles":
                 continue
             self.stream_handler.particle_types[key] = particle_types[key]
         self.ds._find_particle_types()
@@ -367,11 +367,11 @@
         return True
 
     def _find_particle_types(self):
-        particle_types = []
+        particle_types = set([])
         for k, v in self.stream_handler.particle_types.items():
             if v:
-                particle_types.append(k[0])
-        self.particle_types = tuple(set(particle_types))
+                particle_types.add(k[0])
+        self.particle_types = tuple(particle_types)
         self.particle_types_raw = self.particle_types
 
 class StreamDictFieldHandler(dict):


https://bitbucket.org/yt_analysis/yt/commits/423b987dbef4/
Changeset:   423b987dbef4
User:        jzuhone
Date:        2017-05-24 13:57:00+00:00
Summary:     Decreasing number of test particles to use less memory
Affected #:  2 files

diff -r f44f1ea7df23be4b07c0139f1406bd084b4f89a9 -r 423b987dbef47f749c6c0e66fc7827950b411efe yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -263,7 +263,7 @@
 
 def test_load_particles_types():
 
-    num_particles = 100000
+    num_particles = 10000
 
     data1 = {"particle_position_x": np.random.random(size=num_particles),
              "particle_position_y": np.random.random(size=num_particles),

diff -r f44f1ea7df23be4b07c0139f1406bd084b4f89a9 -r 423b987dbef47f749c6c0e66fc7827950b411efe yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -30,7 +30,7 @@
                   ("io", "particle_position_z"),
                   ("io", "particle_index"),
                   ("io", "particle_gas_density")]
-    num_particles = 1000000
+    num_particles = 10000
     field_dict = {("gas", "density"): ("io", "particle_gas_density")}
     sphere = ds.sphere(ds.domain_center, 0.45)
 
@@ -114,7 +114,7 @@
 
     # Test adding in particles with a different particle type
 
-    num_star_particles = 100000
+    num_star_particles = 20000
     pdata2 = {("star", "particle_position_x"): np.random.uniform(size=num_star_particles),
               ("star", "particle_position_y"): np.random.uniform(size=num_star_particles),
               ("star", "particle_position_z"): np.random.uniform(size=num_star_particles)}


https://bitbucket.org/yt_analysis/yt/commits/c9a09a20ccc4/
Changeset:   c9a09a20ccc4
User:        MatthewTurk
Date:        2017-05-25 21:18:07+00:00
Summary:     Merge pull request #1382 from jzuhone/particle_types

Multiple particle types for stream datasets
Affected #:  9 files

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -163,7 +163,7 @@
     "            particle_position_z = (posz_arr, 'code_length'))\n",
     "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
     "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n",
-    "                       bbox=bbox, nprocs=4)"
+    "                          bbox=bbox, nprocs=4)"
    ]
   },
   {
@@ -638,6 +638,65 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "## Multiple Particle Types"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For both uniform grid data and AMR data, one can specify particle fields with multiple types if the particle field names are given as field tuples instead of strings (the default particle type is `\"io\"`). Make sure that the total number of particles given in the `number_of_particles` key is set to the sum of the number of particles for all types:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "posxr_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
+    "posyr_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
+    "poszr_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
+    "posxb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
+    "posyb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
+    "poszb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n",
+    "data = {\"density\": (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n",
+    "        \"number_of_particles\": 30000,\n",
+    "        (\"red\", \"particle_position_x\"): (posxr_arr, 'code_length'), \n",
+    "        (\"red\", \"particle_position_y\"): (posyr_arr, 'code_length'),\n",
+    "        (\"red\", \"particle_position_z\"): (poszr_arr, 'code_length'),\n",
+    "        (\"blue\", \"particle_position_x\"): (posxb_arr, 'code_length'), \n",
+    "        (\"blue\", \"particle_position_y\"): (posyb_arr, 'code_length'),\n",
+    "        (\"blue\", \"particle_position_z\"): (poszb_arr, 'code_length')}\n",
+    "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n",
+    "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n",
+    "                          bbox=bbox, nprocs=4)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can now see we have multiple particle types:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dd = ds.all_data()\n",
+    "print (ds.particle_types)\n",
+    "print (dd[\"red\", \"particle_position_x\"].size)\n",
+    "print (dd[\"blue\", \"particle_position_x\"].size)\n",
+    "print (dd[\"all\", \"particle_position_x\"].size)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "## Caveats for Loading Generic Array Data"
    ]
   },

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Particle_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -139,12 +139,63 @@
   {
    "cell_type": "code",
    "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+    "slc.set_width((8, 'Mpc'))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, one can specify multiple particle types in the `data` directory by setting the field names to be field tuples (the default field type for particles is `\"io\"`) if one is not specified:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "n_star_particles = 1000000\n",
+    "n_dm_particles = 2000000\n",
+    "\n",
+    "ppxd, ppyd, ppzd = 1e6*np.random.normal(size=[3, n_dm_particles])\n",
+    "ppmd = np.ones(n_dm_particles)\n",
+    "\n",
+    "ppxs, ppys, ppzs = 5e5*np.random.normal(size=[3, n_star_particles])\n",
+    "ppms = 0.1*np.ones(n_star_particles)\n",
+    "\n",
+    "data2 = {('dm', 'particle_position_x'): ppxd,\n",
+    "         ('dm', 'particle_position_y'): ppyd,\n",
+    "         ('dm', 'particle_position_z'): ppzd,\n",
+    "         ('dm', 'particle_mass'): ppmd,\n",
+    "         ('star', 'particle_position_x'): ppxs,\n",
+    "         ('star', 'particle_position_y'): ppys,\n",
+    "         ('star', 'particle_position_z'): ppzs,\n",
+    "         ('star', 'particle_mass'): ppms}\n",
+    "\n",
+    "ds2 = yt.load_particles(data2, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now have separate `\"dm\"` and `\"star\"` particles, as well as their deposited fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {
     "collapsed": false
    },
    "outputs": [],
    "source": [
-    "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+    "slc = yt.SlicePlot(ds2, 2, [('deposit', 'dm_cic'), ('deposit', 'star_cic')])\n",
     "slc.set_width((8, 'Mpc'))"
    ]
   }

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -107,7 +107,11 @@
     # r / r_vir
     def _virial_radius_fraction(field, data):
         virial_radius = data.get_field_parameter("virial_radius")
-        return data["radius"] / virial_radius
+        if virial_radius == 0.0:
+            ret = 0.0
+        else:
+            ret = data["radius"] / virial_radius
+        return ret
 
     registry.add_field(("index", "virial_radius_fraction"), sampling_type="cell", 
                        function=_virial_radius_fraction,

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -152,11 +152,11 @@
     def get_fields(self):
         return self.fields.all_fields
 
-    def get_particle_type(self, field) :
+    def get_particle_type(self, field):
 
-        if field in self.particle_types :
+        if field in self.particle_types:
             return self.particle_types[field]
-        else :
+        else:
             return False
 
 class StreamHierarchy(GridIndex):
@@ -254,35 +254,41 @@
         else:
             self.io = io_registry[self.dataset_type](self.ds)
 
-    def update_data(self, data, units = None):
+    def _reset_particle_count(self):
+        self.grid_particle_count[:] = self.stream_handler.particle_count
+        for i, grid in enumerate(self.grids):
+            grid.NumberOfParticles = self.grid_particle_count[i, 0]
 
+    def update_data(self, data):
         """
         Update the stream data with a new data dict. If fields already exist,
         they will be replaced, but if they do not, they will be added. Fields
         already in the stream but not part of the data dict will be left
-        alone. 
+        alone.
         """
-        [update_field_names(d) for d in data]
-        if units is not None:
-            self.stream_handler.field_units.update(units)
         particle_types = set_particle_types(data[0])
-        ftype = "io"
 
-        for key in data[0].keys() :
-            if key is "number_of_particles": continue
+        for key in data[0].keys():
+            if key == "number_of_particles":
+                continue
             self.stream_handler.particle_types[key] = particle_types[key]
+        self.ds._find_particle_types()
 
-        for i, grid in enumerate(self.grids) :
-            if "number_of_particles" in data[i] :
-                grid.NumberOfParticles = data[i].pop("number_of_particles")
-            for fname in data[i]:
-                if fname in grid.field_data:
-                    grid.field_data.pop(fname, None)
-                elif (ftype, fname) in grid.field_data:
-                    grid.field_data.pop( ("io", fname) )
-                self.stream_handler.fields[grid.id][fname] = data[i][fname]
-            
+        for i, grid in enumerate(self.grids):
+            if "number_of_particles" in data[i]:
+                self.stream_handler.particle_count[i] = data[i].pop("number_of_particles")
+            field_units, gdata = unitify_data(data[i])
+            self.stream_handler.field_units.update(field_units)
+            for field in gdata:
+                if field in grid.field_data:
+                    grid.field_data.pop(field, None)
+                self.stream_handler.fields[grid.id][field] = gdata[field]
+
+        self._reset_particle_count()
         # We only want to create a superset of fields here.
+        for field in self.ds.field_list:
+            if field[0] == "all":
+                self.ds.field_list.remove(field)
         self._detect_output_fields()
         self.ds.create_field_info()
         mylog.debug("Creating Particle Union 'all'")
@@ -298,14 +304,11 @@
 
     def __init__(self, stream_handler, storage_filename=None,
                  geometry="cartesian", unit_system="cgs"):
-        #if parameter_override is None: parameter_override = {}
-        #self._parameter_override = parameter_override
-        #if conversion_override is None: conversion_override = {}
-        #self._conversion_override = conversion_override
         self.fluid_types += ("stream",)
         self.geometry = geometry
         self.stream_handler = stream_handler
-        name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
+        self._find_particle_types()
+        name = "InMemoryParameterFile_%s" % uuid.uuid4().hex
         from yt.data_objects.static_output import _cached_datasets
         _cached_datasets[name] = self
         Dataset.__init__(self, name, self._dataset_type,
@@ -365,6 +368,14 @@
     def _skip_cache(self):
         return True
 
+    def _find_particle_types(self):
+        particle_types = set([])
+        for k, v in self.stream_handler.particle_types.items():
+            if v:
+                particle_types.add(k[0])
+        self.particle_types = tuple(particle_types)
+        self.particle_types_raw = self.particle_types
+
 class StreamDictFieldHandler(dict):
     _additional_fields = ()
 
@@ -410,6 +421,21 @@
     will overwrite any existing particle data, so be careful!
     """
 
+    for ptype in ds.particle_types_raw:
+        check_fields = [(ptype, "particle_position_x"),
+                        (ptype, "particle_position")]
+        if all(f not in pdata for f in check_fields):
+            pdata_ftype = {}
+            for f in [k for k in sorted(pdata)]:
+                if not hasattr(pdata[f], "shape"):
+                    continue
+                if f == 'number_of_particles':
+                    continue
+                mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
+                pdata_ftype[ptype, f] = pdata.pop(f)
+            pdata_ftype.update(pdata)
+            pdata = pdata_ftype
+
     # Note: what we need to do here is a bit tricky.  Because occasionally this
     # gets called before we property handle the field detection, we cannot use
     # any information about the index.  Fortunately for us, we can generate
@@ -417,14 +443,7 @@
     # stream handler.
 
     if len(ds.stream_handler.fields) > 1:
-
-        if ("io", "particle_position_x") in pdata:
-            x, y, z = (pdata["io", "particle_position_%s" % ax] for ax in 'xyz')
-        elif ("io", "particle_position") in pdata:
-            x, y, z = pdata["io", "particle_position"].T
-        else:
-            raise KeyError(
-                "Cannot decompose particle data without position fields!")
+        pdata.pop("number_of_particles", None)
         num_grids = len(ds.stream_handler.fields)
         parent_ids = ds.stream_handler.parent_ids
         num_children = np.zeros(num_grids, dtype='int64')
@@ -441,34 +460,44 @@
                              ds.stream_handler.parent_ids,
                              levels, num_children)
 
-        pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
-        particle_grid_inds = pts.find_points_in_tree()
-        idxs = np.argsort(particle_grid_inds)
-        particle_grid_count = np.bincount(particle_grid_inds.astype("intp"),
-                                          minlength=num_grids)
-        particle_indices = np.zeros(num_grids + 1, dtype='int64')
-        if num_grids > 1 :
-            np.add.accumulate(particle_grid_count.squeeze(),
-                              out=particle_indices[1:])
-        else :
-            particle_indices[1] = particle_grid_count.squeeze()
-
-        pdata.pop("number_of_particles", None)
         grid_pdata = []
-        for i, pcount in enumerate(particle_grid_count):
-            grid = {}
-            grid["number_of_particles"] = pcount
-            start = particle_indices[i]
-            end = particle_indices[i+1]
-            for key in pdata.keys() :
-                grid[key] = pdata[key][idxs][start:end]
+        for i in range(num_grids):
+            grid = {"number_of_particles": 0}
             grid_pdata.append(grid)
 
-    else :
+        for ptype in ds.particle_types_raw:
+            if (ptype, "particle_position_x") in pdata:
+                x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in 'xyz')
+            elif (ptype, "particle_position") in pdata:
+                x, y, z = pdata[ptype, "particle_position"].T
+            else:
+                raise KeyError(
+                    "Cannot decompose particle data without position fields!")
+            pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
+            particle_grid_inds = pts.find_points_in_tree()
+            idxs = np.argsort(particle_grid_inds)
+            particle_grid_count = np.bincount(particle_grid_inds.astype("intp"),
+                                              minlength=num_grids)
+            particle_indices = np.zeros(num_grids + 1, dtype='int64')
+            if num_grids > 1:
+                np.add.accumulate(particle_grid_count.squeeze(),
+                                  out=particle_indices[1:])
+            else:
+                particle_indices[1] = particle_grid_count.squeeze()
+            for i, pcount in enumerate(particle_grid_count):
+                grid_pdata[i]["number_of_particles"] += pcount
+                start = particle_indices[i]
+                end = particle_indices[i+1]
+                for key in pdata.keys():
+                    if key[0] == ptype:
+                        grid_pdata[i][key] = pdata[key][idxs][start:end]
+
+    else:
         grid_pdata = [pdata]
 
     for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
         ds.stream_handler.fields[gi].update(pd)
+        ds.stream_handler.particle_types.update(set_particle_types(pd))
         npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
         ds.stream_handler.particle_count[gi] = npart
 
@@ -532,7 +561,7 @@
         # overridden here.
         if any(f[0] == new_field[1] for f in known_fields) and \
            field_units[new_field] == "":
-            field_units.pop(new_field)
+               field_units.pop(new_field)
     data = new_data
     return field_units, data
 
@@ -628,22 +657,21 @@
     # First we fix our field names
     field_units, data = unitify_data(data)
 
+    dshape = tuple(domain_dimensions)
     for field_name in data:
         fshape = data[field_name].shape
-        dshape = tuple(domain_dimensions)
-        pshape = (number_of_particles, )
-        if fshape != dshape and fshape != pshape:
+        if len(fshape) == 3 and fshape != dshape:
             msg = ("Input data shape %s for field %s does not match provided "
-                   "domain_dimensions %s or number of particles %s")
-            msg = msg % (fshape, field_name, dshape, pshape)
+                   "domain_dimensions %s!")
+            msg = msg % (fshape, field_name, dshape)
             raise RuntimeError(msg)
 
     sfh = StreamDictFieldHandler()
 
     if number_of_particles > 0:
         particle_types = set_particle_types(data)
-        pdata = {} # Used much further below.
-        pdata["number_of_particles"] = number_of_particles
+        # Used much further below.
+        pdata = {"number_of_particles": number_of_particles}
         for key in list(data.keys()):
             if len(data[key].shape) == 1 or key[0] == 'io':
                 if not isinstance(key, tuple):
@@ -716,21 +744,8 @@
 
     sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)
 
-    check_fields = [("io", "particle_position_x"), ("io", "particle_position")]
-
     # Now figure out where the particles go
     if number_of_particles > 0:
-        if all(f not in pdata for f in check_fields):
-            pdata_ftype = {}
-            for f in [k for k in sorted(pdata)]:
-                if not hasattr(pdata[f], "shape"):
-                    continue
-                if f == 'number_of_particles':
-                    continue
-                mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
-                pdata_ftype["io",f] = pdata.pop(f)
-            pdata_ftype.update(pdata)
-            pdata = pdata_ftype
         # This will update the stream handler too
         assign_particle_data(sds, pdata)
 
@@ -957,9 +972,9 @@
             if not isinstance(field, tuple):
                 field = ("unknown", field)
             fi = base_ds._get_field_info(*field)
-            if fi.particle_type :
+            if fi.particle_type and field[0] in base_ds.particle_types_raw:
                 pdata[field] = uconcatenate([grid[field]
-                                               for grid in base_ds.index.grids])
+                                             for grid in base_ds.index.grids])
         pdata["number_of_particles"] = number_of_particles
 
     last_gc = base_ds.index.num_grids
@@ -975,15 +990,15 @@
         if callback is not None: callback(ds)
         grid_data = []
         for g in ds.index.grids:
-            gd = dict( left_edge = g.LeftEdge,
-                       right_edge = g.RightEdge,
-                       level = g.Level,
-                       dimensions = g.ActiveDimensions )
+            gd = dict(left_edge=g.LeftEdge,
+                       right_edge=g.RightEdge,
+                       level=g.Level,
+                       dimensions=g.ActiveDimensions)
             for field in ds.field_list:
                 if not isinstance(field, tuple):
                     field = ("unknown", field)
                 fi = ds._get_field_info(*field)
-                if not fi.particle_type :
+                if not fi.particle_type:
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < ds.index.max_level: continue
@@ -993,36 +1008,38 @@
                 LE = sg.left_index * g.dds + ds.domain_left_edge
                 dims = sg.dimensions * ds.refine_by
                 grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims)
-                gd = dict(left_edge = LE, right_edge = grid.right_edge,
-                          level = g.Level + 1, dimensions = dims)
+                gd = dict(left_edge=LE, right_edge=grid.right_edge,
+                          level=g.Level + 1, dimensions=dims)
                 for field in ds.field_list:
                     if not isinstance(field, tuple):
                         field = ("unknown", field)
                     fi = ds._get_field_info(*field)
-                    if not fi.particle_type :
+                    if not fi.particle_type:
                         gd[field] = grid[field]
                 grid_data.append(gd)
 
         ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox)
 
-        if number_of_particles > 0:
-            if ("io", "particle_position_x") not in pdata:
-                pdata_ftype = {}
-                for f in [k for k in sorted(pdata)]:
-                    if not hasattr(pdata[f], "shape"): continue
-                    mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
-                    pdata_ftype["io",f] = pdata.pop(f)
-                pdata_ftype.update(pdata)
-                pdata = pdata_ftype
-            assign_particle_data(ds, pdata)
-            # We need to reassign the field list here.
         cur_gc = ds.index.num_grids
 
+    ds.particle_types_raw = base_ds.particle_types_raw
+    ds.particle_types = ds.particle_types_raw
+
+    # Now figure out where the particles go
+    if number_of_particles > 0:
+        # This will update the stream handler too
+        assign_particle_data(ds, pdata)
+        # Because we've already used the index, we
+        # have to re-create the field info because
+        # we added particle data after the fact
+        ds.index._reset_particle_count()
+        ds.index._detect_output_fields()
+        ds.create_field_info()
+
     return ds
 
 class StreamParticleIndex(ParticleIndex):
 
-
     def __init__(self, ds, dataset_type = None):
         self.stream_handler = ds.stream_handler
         super(StreamParticleIndex, self).__init__(ds, dataset_type)
@@ -1129,7 +1146,6 @@
     data = pdata # Drop reference count
     update_field_names(data)
     particle_types = set_particle_types(data)
-
     sfh.update({'stream_file':data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge
@@ -1680,6 +1696,9 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
+    def _find_particle_types(self):
+        pass
+
 def load_unstructured_mesh(connectivity, coordinates, node_data=None,
                            elem_data=None, length_unit=None, bbox=None,
                            sim_time=0.0, mass_unit=None, time_unit=None,

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -74,8 +74,11 @@
         from yt.fields.magnetic_field import \
             setup_magnetic_field_aliases
         for field in self.ds.stream_handler.field_units:
+            if field[0] in self.ds.particle_types:
+                continue
             units = self.ds.stream_handler.field_units[field]
-            if units != '': self.add_output_field(field, sampling_type="cell", units=units)
+            if units != '': 
+                self.add_output_field(field, sampling_type="cell", units=units)
         setup_magnetic_field_aliases(self, "stream", ["magnetic_field_%s" % ax for ax in "xyz"])
 
     def add_output_field(self, name, sampling_type, **kwargs):

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -2,7 +2,10 @@
 
 from yt.testing import \
     assert_equal
-from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+from yt.frontends.stream.api import load_uniform_grid, \
+    refine_amr, \
+    load_amr_grids, \
+    load_particles
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 
@@ -15,7 +18,7 @@
     x = np.random.uniform(size=num_particles)
     y = np.random.uniform(size=num_particles)
     z = np.random.uniform(size=num_particles)
-    m = np.ones((num_particles))
+    m = np.ones(num_particles)
 
     # Field operators and cell flagging methods
 
@@ -31,13 +34,13 @@
 
     grid_data = []
 
-    for grid in amr0.index.grids :
+    for grid in amr0.index.grids:
 
-        data = dict(left_edge = grid.LeftEdge,
-                    right_edge = grid.RightEdge,
-                    level = grid.Level,
-                    dimensions = grid.ActiveDimensions,
-                    number_of_particles = grid.NumberOfParticles)
+        data = dict(left_edge=grid.LeftEdge,
+                    right_edge=grid.RightEdge,
+                    level=grid.Level,
+                    dimensions=grid.ActiveDimensions,
+                    number_of_particles=grid.NumberOfParticles)
 
         for field in amr0.field_list:
             data[field] = grid[field]
@@ -67,6 +70,12 @@
     assert_equal(number_of_particles1, num_particles)
     assert_equal(number_of_particles1, number_of_particles2)
 
+    for grid in ug2.index.grids:
+        tot_parts = grid["io","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
     # Check to make sure the fields have been defined correctly
 
     for ptype in ("all", "io"):
@@ -87,20 +96,19 @@
 
     amr1 = refine_amr(ug1, rc, fo, 3)
     for field in sorted(ug1.field_list):
-        assert_equal((field in amr1.field_list), True)
+        assert field in amr1.field_list
 
     grid_data = []
 
     for grid in amr1.index.grids:
 
-        data = dict(left_edge = grid.LeftEdge,
-                    right_edge = grid.RightEdge,
-                    level = grid.Level,
-                    dimensions = grid.ActiveDimensions,
-                    number_of_particles = grid.NumberOfParticles)
+        data = dict(left_edge=grid.LeftEdge,
+                    right_edge=grid.RightEdge,
+                    level=grid.Level,
+                    dimensions=grid.ActiveDimensions,
+                    number_of_particles=grid.NumberOfParticles)
 
         for field in amr1.field_list:
-
             data[field] = grid[field]
 
         grid_data.append(data)
@@ -115,6 +123,18 @@
     assert_equal(np.sum(number_of_particles1), num_particles)
     assert_equal(number_of_particles1, number_of_particles2)
 
+    for grid in amr1.index.grids:
+        tot_parts = grid["io","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+    for grid in amr2.index.grids:
+        tot_parts = grid["io","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
     assert amr1._get_field_info("all", "particle_position_x").particle_type
     assert amr1._get_field_info("all", "particle_position_y").particle_type
     assert amr1._get_field_info("all", "particle_position_z").particle_type
@@ -126,3 +146,164 @@
     assert amr2._get_field_info("all", "particle_position_z").particle_type
     assert amr2._get_field_info("all", "particle_mass").particle_type
     assert not amr2._get_field_info("gas", "density").particle_type
+
+    # Now perform similar checks, but with multiple particle types
+
+    num_dm_particles = 30000
+    xd = np.random.uniform(size=num_dm_particles)
+    yd = np.random.uniform(size=num_dm_particles)
+    zd = np.random.uniform(size=num_dm_particles)
+    md = np.ones(num_dm_particles)
+
+    num_star_particles = 20000
+    xs = np.random.uniform(size=num_star_particles)
+    ys = np.random.uniform(size=num_star_particles)
+    zs = np.random.uniform(size=num_star_particles)
+    ms = 2.0*np.ones(num_star_particles)
+
+    dens = np.random.random(domain_dims)
+
+    fields3 = {"density": dens,
+               ("dm", "particle_position_x"): xd,
+               ("dm", "particle_position_y"): yd,
+               ("dm", "particle_position_z"): zd,
+               ("dm", "particle_mass"): md,
+               ("star", "particle_position_x"): xs,
+               ("star", "particle_position_y"): ys,
+               ("star", "particle_position_z"): zs,
+               ("star", "particle_mass"): ms,
+               "number_of_particles": num_dm_particles+num_star_particles}
+
+    fields4 = fields3.copy()
+
+    ug3 = load_uniform_grid(fields3, domain_dims, 1.0)
+    ug4 = load_uniform_grid(fields4, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles3 = np.sum([grid.NumberOfParticles for grid in ug3.index.grids])
+    number_of_particles4 = np.sum([grid.NumberOfParticles for grid in ug4.index.grids])
+
+    assert_equal(number_of_particles3, num_dm_particles+num_star_particles)
+    assert_equal(number_of_particles3, number_of_particles4)
+
+    for grid in ug4.index.grids:
+        tot_parts = grid["dm","particle_position_x"].size
+        tot_parts += grid["star","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+    # Check to make sure the fields have been defined correctly
+
+    for ptype in ("dm", "star"):
+        assert ug3._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug3._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug3._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug3._get_field_info(ptype, "particle_mass").particle_type
+        assert ug4._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug4._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug4._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug4._get_field_info(ptype, "particle_mass").particle_type
+
+    # Now refine this
+
+    amr3 = refine_amr(ug3, rc, fo, 3)
+    for field in sorted(ug3.field_list):
+        assert field in amr3.field_list
+
+    grid_data = []
+
+    for grid in amr3.index.grids:
+
+        data = dict(left_edge=grid.LeftEdge,
+                    right_edge=grid.RightEdge,
+                    level=grid.Level,
+                    dimensions=grid.ActiveDimensions,
+                    number_of_particles=grid.NumberOfParticles)
+
+        for field in amr3.field_list:
+            data[field] = grid[field]
+
+        grid_data.append(data)
+
+    amr4 = load_amr_grids(grid_data, domain_dims)
+
+    # Check everything again
+
+    number_of_particles3 = [grid.NumberOfParticles for grid in amr3.index.grids]
+    number_of_particles4 = [grid.NumberOfParticles for grid in amr4.index.grids]
+
+    assert_equal(np.sum(number_of_particles3), num_star_particles+num_dm_particles)
+    assert_equal(number_of_particles3, number_of_particles4)
+
+    for ptype in ("dm", "star"):
+        assert amr3._get_field_info(ptype, "particle_position_x").particle_type
+        assert amr3._get_field_info(ptype, "particle_position_y").particle_type
+        assert amr3._get_field_info(ptype, "particle_position_z").particle_type
+        assert amr3._get_field_info(ptype, "particle_mass").particle_type
+        assert amr4._get_field_info(ptype, "particle_position_x").particle_type
+        assert amr4._get_field_info(ptype, "particle_position_y").particle_type
+        assert amr4._get_field_info(ptype, "particle_position_z").particle_type
+        assert amr4._get_field_info(ptype, "particle_mass").particle_type
+
+    for grid in amr3.index.grids:
+        tot_parts = grid["dm","particle_position_x"].size
+        tot_parts += grid["star","particle_position_x"].size
+        tot_all_parts = grid["all","particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+    for grid in amr4.index.grids:
+        tot_parts = grid["dm", "particle_position_x"].size
+        tot_parts += grid["star", "particle_position_x"].size
+        tot_all_parts = grid["all", "particle_position_x"].size
+        assert tot_parts == grid.NumberOfParticles
+        assert tot_all_parts == grid.NumberOfParticles
+
+def test_load_particles_types():
+
+    num_particles = 10000
+
+    data1 = {"particle_position_x": np.random.random(size=num_particles),
+             "particle_position_y": np.random.random(size=num_particles),
+             "particle_position_z": np.random.random(size=num_particles),
+             "particle_mass": np.ones(num_particles)}
+
+    ds1 = load_particles(data1)
+    ds1.index
+
+    assert set(ds1.particle_types) == {"all", "io"}
+
+    dd = ds1.all_data()
+
+    for ax in "xyz":
+        assert dd["io", "particle_position_%s" % ax].size == num_particles
+        assert dd["all", "particle_position_%s" % ax].size == num_particles
+
+    num_dm_particles = 10000
+    num_star_particles = 50000
+    num_tot_particles = num_dm_particles + num_star_particles
+
+    data2 = {("dm", "particle_position_x"): np.random.random(size=num_dm_particles),
+             ("dm", "particle_position_y"): np.random.random(size=num_dm_particles),
+             ("dm", "particle_position_z"): np.random.random(size=num_dm_particles),
+             ("dm", "particle_mass"): np.ones(num_dm_particles),
+             ("star", "particle_position_x"): np.random.random(size=num_star_particles),
+             ("star", "particle_position_y"): np.random.random(size=num_star_particles),
+             ("star", "particle_position_z"): np.random.random(size=num_star_particles),
+             ("star", "particle_mass"): 2.0*np.ones(num_star_particles)}
+
+    ds2 = load_particles(data2)
+    ds2.index
+
+    assert set(ds2.particle_types) == {"all", "star", "dm"}
+
+    dd = ds2.all_data()
+
+    for ax in "xyz":
+        npart = 0
+        for ptype in ds2.particle_types_raw:
+            npart += dd[ptype, "particle_position_%s" % ax].size
+        assert npart == num_tot_particles
+        assert dd["all", "particle_position_%s" % ax].size == num_tot_particles

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/frontends/stream/tests/test_update_data.py
--- a/yt/frontends/stream/tests/test_update_data.py
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -2,15 +2,15 @@
 from yt.data_objects.profiles import create_profile
 from numpy.random import uniform
 
-def test_update_data() :
+def test_update_data():
     ds = fake_random_ds(64, nprocs=8)
     ds.index
     dims = (32,32,32)
-    grid_data = [{"temperature":uniform(size=dims)}
+    grid_data = [{"temperature": uniform(size=dims)}
                  for i in range(ds.index.num_grids)]
-    ds.index.update_data(grid_data, {('gas', 'temperature'):'K'})
+    ds.index.update_data(grid_data)
     prj = ds.proj("temperature", 2)
     prj["temperature"]
     dd = ds.all_data()
     profile = create_profile(dd, "density", "temperature", 10)
-    profile["temperature"]                             
+    profile["temperature"]

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -7,28 +7,28 @@
 
 class ParticleGenerator(object):
 
-    default_fields = [("io", "particle_position_x"),
-                      ("io", "particle_position_y"),
-                      ("io", "particle_position_z")]
-    
-    def __init__(self, ds, num_particles, field_list):
+    def __init__(self, ds, num_particles, field_list, ptype="io"):
         """
         Base class for generating particle fields which may be applied to
         streams. Normally this would not be called directly, since it doesn't
         really do anything except allocate memory. Takes a *ds* to serve as the
         basis for determining grids, the number of particles *num_particles*,
-        and a list of fields, *field_list*.
+        a list of fields, *field_list*, and the particle type *ptype*, which
+        has a default value of "io". 
         """
         self.ds = ds
         self.num_particles = num_particles
-        self.field_list = [("io",fd) if isinstance(fd,string_types) else fd
+        self.field_list = [(ptype, fd) if isinstance(fd, string_types) else fd
                            for fd in field_list]
-        self.field_list.append(("io", "particle_index"))
+        self.field_list.append((ptype, "particle_index"))
         self.field_units = dict(
-          (('io', 'particle_position_%s' % ax), 'code_length')
+          ((ptype, 'particle_position_%s' % ax), 'code_length')
           for ax in 'xyz')
-        self.field_units['io', 'particle_index'] = ''
-        
+        self.field_units[ptype, 'particle_index'] = ''
+        self.ptype = ptype
+
+        self._set_default_fields()
+
         try:
             self.posx_index = self.field_list.index(self.default_fields[0])
             self.posy_index = self.field_list.index(self.default_fields[1])
@@ -36,35 +36,40 @@
         except:
             raise KeyError("You must specify position fields: " +
                            " ".join(["particle_position_%s" % ax for ax in "xyz"]))
-        self.index_index = self.field_list.index(("io", "particle_index"))
-        
+        self.index_index = self.field_list.index((ptype, "particle_index"))
+
         self.num_grids = self.ds.index.num_grids
-        self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
+        self.NumberOfParticles = np.zeros(self.num_grids, dtype='int64')
         self.ParticleGridIndices = np.zeros(self.num_grids + 1, dtype='int64')
-        
+
         self.num_fields = len(self.field_list)
-        
+
         self.particles = np.zeros((self.num_particles, self.num_fields),
                                   dtype='float64')
 
+    def _set_default_fields(self):
+        self.default_fields = [(self.ptype, "particle_position_x"),
+                               (self.ptype, "particle_position_y"),
+                               (self.ptype, "particle_position_z")]
+
     def has_key(self, key):
         """
         Check to see if *key* is in the particle field list.
         """
         return key in self.field_list
-            
+
     def keys(self):
         """
         Return the list of particle fields.
         """
         return self.field_list
-    
+
     def __getitem__(self, key):
         """
         Get the field associated with key.
         """
         return self.particles[:,self.field_list.index(key)]
-    
+
     def __setitem__(self, key, val):
         """
         Sets a field to be some other value. Note that we assume
@@ -72,7 +77,7 @@
         make sure the setting of the field is consistent with this.
         """
         self.particles[:,self.field_list.index(key)] = val[:]
-                
+
     def __len__(self):
         """
         The number of particles
@@ -95,17 +100,17 @@
             else:
                 tr[field] = self.particles[start:end, fi]
         return tr
-    
-    def _setup_particles(self,x,y,z,setup_fields=None):
+
+    def _setup_particles(self, x, y, z, setup_fields=None):
         """
         Assigns grids to particles and sets up particle positions. *setup_fields* is
         a dict of fields other than the particle positions to set up. 
         """
-        particle_grids, particle_grid_inds = self.ds.index._find_points(x,y,z)
+        particle_grids, particle_grid_inds = self.ds.index._find_points(x, y, z)
         idxs = np.argsort(particle_grid_inds)
-        self.particles[:,self.posx_index] = x[idxs]
-        self.particles[:,self.posy_index] = y[idxs]
-        self.particles[:,self.posz_index] = z[idxs]
+        self.particles[:, self.posx_index] = x[idxs]
+        self.particles[:, self.posy_index] = y[idxs]
+        self.particles[:, self.posz_index] = z[idxs]
         self.NumberOfParticles = np.bincount(particle_grid_inds.astype("intp"),
                                              minlength=self.num_grids)
         if self.num_grids > 1:
@@ -115,20 +120,20 @@
             self.ParticleGridIndices[1] = self.NumberOfParticles.squeeze()
         if setup_fields is not None:
             for key, value in setup_fields.items():
-                field = ("io",key) if isinstance(key, string_types) else key
+                field = (self.ptype, key) if isinstance(key, string_types) else key
                 if field not in self.default_fields:
                     self.particles[:,self.field_list.index(field)] = value[idxs]
-    
+
     def assign_indices(self, function=None, **kwargs):
         """
         Assign unique indices to the particles. The default is to just use
         numpy.arange, but any function may be supplied with keyword arguments.
         """
-        if function is None :
-            self.particles[:,self.index_index] = np.arange((self.num_particles))
-        else :
-            self.particles[:,self.index_index] = function(**kwargs)
-            
+        if function is None:
+            self.particles[:, self.index_index] = np.arange(self.num_particles)
+        else:
+            self.particles[:, self.index_index] = function(**kwargs)
+
     def map_grid_fields_to_particles(self, mapping_dict):
         r"""
         For the fields in  *mapping_dict*, map grid fields to the particles
@@ -164,36 +169,30 @@
 
     def apply_to_stream(self, clobber=False):
         """
-        Apply the particles to a stream dataset. If particles already exist,
-        and clobber=False, do not overwrite them, but add the new ones to them. 
+        Apply the particles to a grid-based stream dataset. If particles 
+        already exist, and clobber=False, do not overwrite them, but add 
+        the new ones to them.
         """
         grid_data = []
-        for i,g in enumerate(self.ds.index.grids):
+        for i, g in enumerate(self.ds.index.grids):
             data = {}
-            if clobber :
+            if clobber:
                 data["number_of_particles"] = self.NumberOfParticles[i]
-            else :
+            else:
                 data["number_of_particles"] = self.NumberOfParticles[i] + \
                                               g.NumberOfParticles
             grid_particles = self.get_for_grid(g)
-            for field in self.field_list :
+            for field in self.field_list:
                 if data["number_of_particles"] > 0:
-                    # We have particles in this grid
-                    if g.NumberOfParticles > 0 and not clobber:
-                        # Particles already exist
-                        if field in self.ds.field_list:
-                            # This field already exists
-                            prev_particles = g[field]
-                        else:
-                            # This one doesn't, set the previous particles' field
-                            # values to zero
-                            prev_particles = np.zeros((g.NumberOfParticles))
-                            prev_particles = self.ds.arr(prev_particles,
-                                input_units = self.field_units[field])
-                        data[field] = uconcatenate((prev_particles,
-                                                    grid_particles[field]))
+                    if g.NumberOfParticles > 0 and not clobber and \
+                        field in self.ds.field_list:
+                        # We have particles in this grid, we're not
+                        # overwriting them, and the field is in the field
+                        # list already
+                        data[field] = uconcatenate([g[field],
+                                                    grid_particles[field]])
                     else:
-                        # Particles do not already exist or we're clobbering
+                        # Otherwise, simply add the field in
                         data[field] = grid_particles[field]
                 else:
                     # We don't have particles in this grid
@@ -203,7 +202,7 @@
 
 class FromListParticleGenerator(ParticleGenerator):
 
-    def __init__(self, ds, num_particles, data):
+    def __init__(self, ds, num_particles, data, ptype="io"):
         r"""
         Generate particle fields from array-like lists contained in a dict.
 
@@ -215,6 +214,8 @@
             The number of particles in the dict.
         data : dict of NumPy arrays
             The particle fields themselves.
+        ptype : string, optional
+            The particle type for these particle fields. Default: "io"
 
         Examples
         --------
@@ -233,10 +234,10 @@
             x = data.pop("particle_position_x")
             y = data.pop("particle_position_y")
             z = data.pop("particle_position_z")
-        elif ("io","particle_position_x") in data:
-            x = data.pop(("io", "particle_position_x"))
-            y = data.pop(("io", "particle_position_y"))
-            z = data.pop(("io", "particle_position_z"))
+        elif (ptype,"particle_position_x") in data:
+            x = data.pop((ptype, "particle_position_x"))
+            y = data.pop((ptype, "particle_position_y"))
+            z = data.pop((ptype, "particle_position_z"))
 
         xcond = np.logical_or(x < ds.domain_left_edge[0],
                               x >= ds.domain_right_edge[0])
@@ -250,13 +251,14 @@
         if np.any(cond):
             raise ValueError("Some particles are outside of the domain!!!")
 
-        ParticleGenerator.__init__(self, ds, num_particles, field_list)
-        self._setup_particles(x,y,z,setup_fields=data)
-        
+        super(FromListParticleGenerator, self).__init__(ds, num_particles, 
+                                                        field_list, ptype=ptype)
+        self._setup_particles(x, y, z, setup_fields=data)
+
 class LatticeParticleGenerator(ParticleGenerator):
 
     def __init__(self, ds, particles_dims, particles_left_edge,
-                 particles_right_edge, field_list):
+                 particles_right_edge, field_list, ptype="io"):
         r"""
         Generate particles in a lattice arrangement. 
 
@@ -272,7 +274,9 @@
              The 'right-most' ending positions of the lattice.
         field_list : list of strings
              A list of particle fields
-             
+        ptype : string, optional
+            The particle type for these particle fields. Default: "io"
+
         Examples
         --------
         >>> dims = (128,128,128)
@@ -293,33 +297,34 @@
         xmax = particles_right_edge[0]
         ymax = particles_right_edge[1]
         zmax = particles_right_edge[2]
-        DLE = ds.domain_left_edge.in_units("code_length").ndarray_view()
-        DRE = ds.domain_right_edge.in_units("code_length").ndarray_view()
+        DLE = ds.domain_left_edge.in_units("code_length").d
+        DRE = ds.domain_right_edge.in_units("code_length").d
 
         xcond = (xmin < DLE[0]) or (xmax >= DRE[0])
         ycond = (ymin < DLE[1]) or (ymax >= DRE[1])
         zcond = (zmin < DLE[2]) or (zmax >= DRE[2])
         cond = xcond or ycond or zcond
 
-        if cond :
+        if cond:
             raise ValueError("Proposed bounds for particles are outside domain!!!")
 
-        ParticleGenerator.__init__(self, ds, num_x*num_y*num_z, field_list)
+        super(LatticeParticleGenerator, self).__init__(ds, num_x*num_y*num_z, 
+                                                       field_list, ptype=ptype)
 
         dx = (xmax-xmin)/(num_x-1)
         dy = (ymax-ymin)/(num_y-1)
         dz = (zmax-zmin)/(num_z-1)
-        inds = np.indices((num_x,num_y,num_z))
+        inds = np.indices((num_x, num_y, num_z))
         xpos = inds[0]*dx + xmin
         ypos = inds[1]*dy + ymin
         zpos = inds[2]*dz + zmin
-        
+
         self._setup_particles(xpos.flat[:], ypos.flat[:], zpos.flat[:])
-        
+
 class WithDensityParticleGenerator(ParticleGenerator):
 
     def __init__(self, ds, data_source, num_particles, field_list,
-                 density_field="density"):
+                 density_field="density", ptype="io"):
         r"""
         Generate particles based on a density field.
 
@@ -336,7 +341,9 @@
         density_field : string, optional
             A density field which will serve as the distribution function for the
             particle positions. Theoretically, this could be any 'per-volume' field. 
-            
+        ptype : string, optional
+            The particle type for these particle fields. Default: "io"
+
         Examples
         --------
         >>> sphere = ds.sphere(ds.domain_center, 0.5)
@@ -348,7 +355,8 @@
         >>>                                          fields, density_field='Dark_Matter_Density')
         """
 
-        ParticleGenerator.__init__(self, ds, num_particles, field_list)
+        super(WithDensityParticleGenerator, self).__init__(ds, num_particles,
+                                                           field_list, ptype=ptype)
 
         num_cells = len(data_source["x"].flat)
         max_mass = (data_source[density_field]*
@@ -357,22 +365,22 @@
         all_x = []
         all_y = []
         all_z = []
-        
+
         pbar = get_pbar("Generating Particles", num_particles)
         tot_num_accepted = int(0)
-        
+
         while num_particles_left > 0:
 
             m = np.random.uniform(high=1.01*max_mass,
                                   size=num_particles_left)
             idxs = np.random.random_integers(low=0, high=num_cells-1,
                                              size=num_particles_left)
-            m_true = (data_source[density_field]*
+            m_true = (data_source[density_field] *
                       data_source["cell_volume"]).flat[idxs]
             accept = m <= m_true
             num_accepted = accept.sum()
             accepted_idxs = idxs[accept]
-            
+
             xpos = data_source["x"].flat[accepted_idxs] + \
                    np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                    data_source["dx"].flat[accepted_idxs]
@@ -397,5 +405,5 @@
         y = uconcatenate(all_y)
         z = uconcatenate(all_z)
 
-        self._setup_particles(x,y,z)
-        
+        self._setup_particles(x, y, z)
+

diff -r ad734750c43236b14d3a19f4eff5389a63eb4524 -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -30,14 +30,14 @@
                   ("io", "particle_position_z"),
                   ("io", "particle_index"),
                   ("io", "particle_gas_density")]
-    num_particles = 1000000
+    num_particles = 10000
     field_dict = {("gas", "density"): ("io", "particle_gas_density")}
     sphere = ds.sphere(ds.domain_center, 0.45)
 
     particles1 = WithDensityParticleGenerator(ds, sphere, num_particles, field_list)
     particles1.assign_indices()
     particles1.map_grid_fields_to_particles(field_dict)
-    
+
     # Test to make sure we ended up with the right number of particles per grid
     particles1.apply_to_stream()
     particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
@@ -49,23 +49,20 @@
     assert(np.unique(tags).size == num_particles)
 
     del tags
-    
+
     # Set up a lattice of particles
     pdims = np.array([32,32,32])
-    def new_indices() :
+    def new_indices():
         # We just add new indices onto the existing ones
         return np.arange((np.product(pdims)))+num_particles
     le = np.array([0.25,0.25,0.25])
     re = np.array([0.75,0.75,0.75])
-    new_field_list = field_list + [("io", "particle_gas_temperature")]
-    new_field_dict = {("gas", "density"): ("io", "particle_gas_density"),
-                      ("gas", "temperature"): ("io", "particle_gas_temperature")}
 
-    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
+    particles2 = LatticeParticleGenerator(ds, pdims, le, re, field_list)
     particles2.assign_indices(function=new_indices)
-    particles2.map_grid_fields_to_particles(new_field_dict)
+    particles2.map_grid_fields_to_particles(field_dict)
 
-    #Test lattice positions
+    # Test lattice positions
     xpos = np.unique(particles2["io", "particle_position_x"])
     ypos = np.unique(particles2["io", "particle_position_y"])
     zpos = np.unique(particles2["io", "particle_position_z"])
@@ -74,14 +71,14 @@
     ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
     zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
 
-    assert_almost_equal( xpos, xpred)
-    assert_almost_equal( ypos, ypred)
-    assert_almost_equal( zpos, zpred)
+    assert_almost_equal(xpos, xpred)
+    assert_almost_equal(ypos, ypred)
+    assert_almost_equal(zpos, zpred)
 
     del xpos, ypos, zpos
     del xpred, ypred, zpred
 
-    #Test the number of particles again
+    # Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
     assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
@@ -90,43 +87,54 @@
     particles_per_grid2 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
     assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
 
-    #Test the uniqueness of tags
+    # Test the uniqueness of tags
     tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
     tags.sort()
     assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
 
     del tags
 
-    # Test that the old particles have zero for the new field
-    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
-                          for i, grid in enumerate(ds.index.grids)]
-    test_zeros = [np.zeros((particles_per_grid1[i])) 
-                  for i, grid in enumerate(ds.index.grids)]
-    assert_equal(old_particle_temps, test_zeros)
-
-    del old_particle_temps
-    del test_zeros
-
-    #Now dump all of these particle fields out into a dict
+    # Now dump all of these particle fields out into a dict
     pdata = {}
     dd = ds.all_data()
-    for field in new_field_list :
+    for field in field_list:
         pdata[field] = dd[field]
 
-    #Test the "from-list" generator and particle field clobber
-    particles3 = FromListParticleGenerator(ds, num_particles+np.product(pdims), pdata)
+    # Test the "from-list" generator and particle field clobber
+    num_particles3 = num_particles+np.product(pdims)
+    particles3 = FromListParticleGenerator(ds, num_particles3, pdata)
     particles3.apply_to_stream(clobber=True)
-    
-    #Test the number of particles again
+
+    # Test the number of particles again
     particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
     particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
+    assert_equal(particles_per_grid2, particles_per_grid3)
+
+    # Test adding in particles with a different particle type
+
+    num_star_particles = 20000
+    pdata2 = {("star", "particle_position_x"): np.random.uniform(size=num_star_particles),
+              ("star", "particle_position_y"): np.random.uniform(size=num_star_particles),
+              ("star", "particle_position_z"): np.random.uniform(size=num_star_particles)}
+
+    particles4 = FromListParticleGenerator(ds, num_star_particles, pdata2, ptype="star")
+    particles4.apply_to_stream()
+
+    dd = ds.all_data()
+    assert dd["star", "particle_position_x"].size == num_star_particles
+    assert dd["io", "particle_position_x"].size == num_particles3
+    assert dd["all", "particle_position_x"].size == num_star_particles+num_particles3
+
 
     del pdata
+    del pdata2
     del ds
     del particles1
     del particles2
+    del particles4
     del fields
     del dens
     del temp
+


https://bitbucket.org/yt_analysis/yt/commits/ed15fffe38b6/
Changeset:   ed15fffe38b6
User:        ngoldbaum
Date:        2017-05-25 12:42:51+00:00
Summary:     fix I/O issue with little endian Gadget snap format 2 data
Affected #:  2 files

diff -r 24e049078e06a910fd24d8a24faf970196ae178c -r ed15fffe38b678613825e89e8b2259034441f62b yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -353,11 +353,10 @@
             endianswap = '<'
         elif rhead == 65536:
             endianswap = '>'
-        # Enabled Format2 here
-        elif rhead == 8:
-            f.close()
-            return True, 'f8'
-        elif rhead == 134217728:
+        elif rhead in (8, 134217728):
+            # This is only true for snapshot format 2
+            # we do not currently support double precision
+            # snap format 2 data
             f.close()
             return True, 'f4'
         else:

diff -r 24e049078e06a910fd24d8a24faf970196ae178c -r ed15fffe38b678613825e89e8b2259034441f62b yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -26,6 +26,7 @@
 isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
 isothermal_bin = "IsothermalCollapse/snap_505"
 BE_Gadget = "BigEndianGadgetBinary/BigEndianGadgetBinary"
+LE_SnapFormat2 = "Gadget3-snap-format2/Gadget3-snap-format2"
 
 # This maps from field names to weight field names to use for projections
 iso_fields = OrderedDict(
@@ -46,13 +47,14 @@
 @requires_file(isothermal_h5)
 @requires_file(isothermal_bin)
 @requires_file(BE_Gadget)
+ at requires_file(LE_SnapFormat2)
 def test_GadgetDataset():
     assert isinstance(data_dir_load(isothermal_h5, kwargs=iso_kwargs),
                       GadgetHDF5Dataset)
     assert isinstance(data_dir_load(isothermal_bin, kwargs=iso_kwargs),
                       GadgetDataset)
-    assert isinstance(data_dir_load(BE_Gadget, kwargs=''),
-                      GadgetDataset)
+    assert isinstance(data_dir_load(BE_Gadget), GadgetDataset)
+    assert isinstance(data_dir_load(LE_SnapFormat2), GadgetDataset)
 
 
 @requires_ds(isothermal_h5)


https://bitbucket.org/yt_analysis/yt/commits/46b86fe1b681/
Changeset:   46b86fe1b681
User:        MatthewTurk
Date:        2017-05-25 21:18:27+00:00
Summary:     Merge pull request #1425 from ngoldbaum/gadget-snap2-fixes

fix I/O issue with little endian Gadget snap format 2 data
Affected #:  2 files

diff -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 -r 46b86fe1b681c4deee58e4d9850df853b4d86ec6 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -353,11 +353,10 @@
             endianswap = '<'
         elif rhead == 65536:
             endianswap = '>'
-        # Enabled Format2 here
-        elif rhead == 8:
-            f.close()
-            return True, 'f8'
-        elif rhead == 134217728:
+        elif rhead in (8, 134217728):
+            # This is only true for snapshot format 2
+            # we do not currently support double precision
+            # snap format 2 data
             f.close()
             return True, 'f4'
         else:

diff -r c9a09a20ccc4204fb9f8fed008a38d45c86b3300 -r 46b86fe1b681c4deee58e4d9850df853b4d86ec6 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -26,6 +26,7 @@
 isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
 isothermal_bin = "IsothermalCollapse/snap_505"
 BE_Gadget = "BigEndianGadgetBinary/BigEndianGadgetBinary"
+LE_SnapFormat2 = "Gadget3-snap-format2/Gadget3-snap-format2"
 
 # This maps from field names to weight field names to use for projections
 iso_fields = OrderedDict(
@@ -46,13 +47,14 @@
 @requires_file(isothermal_h5)
 @requires_file(isothermal_bin)
 @requires_file(BE_Gadget)
+ at requires_file(LE_SnapFormat2)
 def test_GadgetDataset():
     assert isinstance(data_dir_load(isothermal_h5, kwargs=iso_kwargs),
                       GadgetHDF5Dataset)
     assert isinstance(data_dir_load(isothermal_bin, kwargs=iso_kwargs),
                       GadgetDataset)
-    assert isinstance(data_dir_load(BE_Gadget, kwargs=''),
-                      GadgetDataset)
+    assert isinstance(data_dir_load(BE_Gadget), GadgetDataset)
+    assert isinstance(data_dir_load(LE_SnapFormat2), GadgetDataset)
 
 
 @requires_ds(isothermal_h5)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list