[yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Nov 30 04:07:47 PST 2012


6 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/22db916e3bc2/
changeset:   22db916e3bc2
branch:      yt
user:        jzuhone
date:        2012-11-27 20:16:38
summary:     Should be allowed to directly update the grid data in the field handler
affected #:  1 file

diff -r 523b01bbce78c6c93538339fb901ad245dc60295 -r 22db916e3bc23adb1980b8c13c37eca2a0e6bf32 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -65,6 +65,12 @@
         # In-place unit conversion requires we return a copy
         return tr.copy()
 
+    def update_data(self, grid, data) :
+
+        for key in data.keys() :
+            
+            self.fields[grid.id][key] = data[key]
+                        
     @property
     def _read_exception(self):
         return KeyError



https://bitbucket.org/yt_analysis/yt/changeset/787cf887d824/
changeset:   787cf887d824
branch:      yt
user:        jzuhone
date:        2012-11-27 22:43:30
summary:     Decided to move this to StreamHierarchy. It now loops over all grids in the hierarchy.
affected #:  2 files

diff -r 22db916e3bc23adb1980b8c13c37eca2a0e6bf32 -r 787cf887d824ca8694bc52ee4448e8e6d38b022d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -249,6 +249,20 @@
         else:
             self.io = io_registry[self.data_style](self.stream_handler)
 
+    def update_data(self, data) :
+
+        for key in data[0].keys() :
+
+            if key not in self.field_list: self.field_list.append(key)
+            
+        for i, grid in enumerate(self.grids) :
+            
+            for key in data[i].keys() :
+
+                self.stream_handler.fields[grid.id][key] = data[i][key]
+
+        self._detect_fields()
+        
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_fallback = StreamFieldInfo


diff -r 22db916e3bc23adb1980b8c13c37eca2a0e6bf32 -r 787cf887d824ca8694bc52ee4448e8e6d38b022d yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -65,12 +65,6 @@
         # In-place unit conversion requires we return a copy
         return tr.copy()
 
-    def update_data(self, grid, data) :
-
-        for key in data.keys() :
-            
-            self.fields[grid.id][key] = data[key]
-                        
     @property
     def _read_exception(self):
         return KeyError



https://bitbucket.org/yt_analysis/yt/changeset/592cf5fd7b89/
changeset:   592cf5fd7b89
branch:      yt
user:        jzuhone
date:        2012-11-28 18:02:07
summary:     Particles should now work for all implementations of the stream frontend so far. Also added a couple of tests.
affected #:  3 files

diff -r 787cf887d824ca8694bc52ee4448e8e6d38b022d -r 592cf5fd7b89992a7641363399240fc09be3e406 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -105,7 +105,7 @@
 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None):
+                 fields, io = None, particle_types = {}):
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions
@@ -116,10 +116,18 @@
         self.num_grids = self.levels.size
         self.fields = fields
         self.io = io
-
+        self.particle_types = particle_types
+            
     def get_fields(self):
         return self.fields.all_fields
 
+    def get_particle_type(self, field) :
+
+        if self.particle_types.has_key(field) :
+            return self.particle_types[field]
+        else :
+            return False
+        
 class StreamHierarchy(AMRHierarchy):
 
     grid = StreamGrid
@@ -150,11 +158,12 @@
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
+            ptype = self.stream_handler.get_particle_type(field)
             # Note that we call add_field on the field_info directly.  This
             # will allow the same field detection mechanism to work for 1D, 2D
             # and 3D fields.
             self.pf.field_info.add_field(
-                    field, lambda a, b: None,
+                    field, lambda a, b: None, particle_type = ptype,
                     convert_function=cf, take_log=False)
 
     def _parse_hierarchy(self):
@@ -250,17 +259,30 @@
             self.io = io_registry[self.data_style](self.stream_handler)
 
     def update_data(self, data) :
+        
+        particle_types = set_particle_types(data[0])
 
         for key in data[0].keys() :
 
-            if key not in self.field_list: self.field_list.append(key)
+            if key is "number_of_particles": continue
             
+            self.stream_handler.particle_types[key] = particle_types[key]
+            
+            if key not in self.field_list:
+                self.field_list.append(key)
+
+        
         for i, grid in enumerate(self.grids) :
             
+            if data[i].has_key("number_of_particles") :
+
+                grid.NumberOfParticles = data[i].pop("number_of_particles")
+                
             for key in data[i].keys() :
 
                 self.stream_handler.fields[grid.id][key] = data[i][key]
-
+            
+        self._setup_unknown_fields()
         self._detect_fields()
         
 class StreamStaticOutput(StaticOutput):
@@ -313,8 +335,73 @@
     @property
     def all_fields(self): return self[0].keys()
 
+def set_particle_types(data) :
+
+    particle_types = {}
+    
+    for key in data.keys() :
+
+        if key is "number_of_particles": continue
+        
+        if len(data[key].shape) == 1:
+            particle_types[key] = True
+        else :
+            particle_types[key] = False
+    
+    return particle_types
+
+def assign_particle_data(pf, pdata) :
+    
+    if pf.h.num_grids > 1 :
+
+        try :
+            x = pdata["particle_position_x"]
+            y = pdata["particle_position_y"]
+            z = pdata["particle_position_z"]
+        except:
+            raise KeyError("Cannot decompose particle data without position fields!")
+        
+        particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+        
+        idxs = np.argsort(particle_grid_inds)
+        
+        particle_grid_count = np.bincount(particle_grid_inds,
+                                          minlength=pf.h.num_grids)
+        particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
+        
+        if pf.h.num_grids > 1 :
+            np.add.accumulate(particle_grid_count.squeeze(),
+                              out=particle_indices[1:])
+        else :
+            particle_indices[1] = particle_grid_count.squeeze()
+            
+        pdata.pop("number_of_particles")
+        
+        grid_pdata = []
+        
+        for i, pcount in enumerate(particle_grid_count) :
+            
+            grid = {}
+            
+            grid["number_of_particles"] = pcount
+            
+            start = particle_indices[i]
+            end = particle_indices[i+1]
+            
+            for key in pdata.keys() :
+                
+                grid[key] = pdata[key][idxs][start:end]
+                
+            grid_pdata.append(grid)
+
+    else :
+
+        grid_pdata = [pdata]
+        
+    pf.h.update_data(grid_pdata)
+                                        
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0, number_of_particles=0):
+                      nprocs=1, sim_time=0.0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -326,6 +413,9 @@
           disappointing or non-existent in most cases.
         * Particles may be difficult to integrate.
 
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
     Parameters
     ----------
     data : dict
@@ -340,8 +430,6 @@
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -361,14 +449,29 @@
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
-
+    
+    if data.has_key("number_of_particles") :
+        number_of_particles = data.pop("number_of_particles")
+    else :
+        number_of_particles = int(0)
+    
+    if number_of_particles > 0 :
+        particle_types = set_particle_types(data)
+        pdata = {}
+        pdata["number_of_particles"] = number_of_particles
+        for key in data.keys() :
+            if len(data[key].shape) == 1 :
+                pdata[key] = data.pop(key)
+    else :
+        particle_types = {}
+    
     if nprocs > 1:
         temp = {}
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
-                decompose_array(data[key], psize, bbox)
+                             decompose_array(data[key], psize, bbox)
             grid_dimensions = np.array([grid.shape for grid in temp[key]],
                                        dtype="int32")
         for gid in range(nprocs):
@@ -389,9 +492,10 @@
         grid_dimensions,
         grid_levels,
         -np.ones(nprocs, dtype='int64'),
-        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        particle_types=particle_types
     )
 
     handler.name = "UniformGridData"
@@ -410,10 +514,16 @@
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    # Now figure out where the particles go
+
+    if number_of_particles > 0 :
+        assign_particle_data(spf, pdata)
+    
     return spf
 
 def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                   sim_time=0.0, number_of_particles=0):
+                   sim_time=0.0):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -432,8 +542,9 @@
     grid_data : list of dicts
         This is a list of dicts.  Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
-        assumed to be fields.  This will be modified in place and can't be
-        assumed to be static..
+        assumed to be fields.  They also may include a particle count, otherwise
+        assumed to be zero. This will be modified in place and can't be
+        assumed to be static.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
     sim_unit_to_cm : float
@@ -442,8 +553,6 @@
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -452,11 +561,13 @@
     ...     dict(left_edge = [0.0, 0.0, 0.0],
     ...          right_edge = [1.0, 1.0, 1.],
     ...          level = 0,
-    ...          dimensions = [32, 32, 32]),
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ...     dict(left_edge = [0.25, 0.25, 0.25],
     ...          right_edge = [0.75, 0.75, 0.75],
     ...          level = 1,
-    ...          dimensions = [32, 32, 32])
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ... ]
     ... 
     >>> for g in grid_data:
@@ -475,23 +586,27 @@
     grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    number_of_particles = np.zeros((ngrids,1), dtype='int64')
     sfh = StreamDictFieldHandler()
     for i, g in enumerate(grid_data):
         grid_left_edges[i,:] = g.pop("left_edge")
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        if g.has_key("number_of_particles") :
+            number_of_particles[i,:] = g.pop("number_of_particles")  
         sfh[i] = g
-
+            
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         None, # parent_ids is none
-        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        number_of_particles,
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
+        particle_types=set_particle_types(grid_data[0])
     )
 
     handler.name = "AMRGridData"
@@ -543,6 +658,20 @@
     >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
     >>> pf = refine_amr(ug, rc, fo, 5)
     """
+
+    # If we have particle data, set it aside for now
+
+    number_of_particles = np.sum([grid.NumberOfParticles
+                                  for grid in base_pf.h.grids])
+
+    if number_of_particles > 0 :
+        pdata = {}
+        for field in base_pf.h.field_list :
+            if base_pf.field_info[field].particle_type :
+                pdata[field] = np.concatenate([grid[field]
+                                               for grid in base_pf.h.grids])
+        pdata["number_of_particles"] = number_of_particles
+        
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
@@ -559,7 +688,8 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                gd[field] = g[field]
+                if not pf.field_info[field].particle_type :
+                    gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
             fg = FlaggingGrid(g, refinement_criteria)
@@ -571,8 +701,14 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    gd[field] = grid[field]
+                    if not pf.field_info[field].particle_type :
+                        gd[field] = grid[field]
                 grid_data.append(gd)
         pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
         cur_gc = pf.h.num_grids
+
+    # Now reassign particle data to grids
+
+    if number_of_particles > 0 : assign_particle_data(pf, pdata)
+    
     return pf


diff -r 787cf887d824ca8694bc52ee4448e8e6d38b022d -r 592cf5fd7b89992a7641363399240fc09be3e406 yt/frontends/stream/tests/test_stream_particles.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -0,0 +1,130 @@
+import numpy as np
+from yt.testing import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_particles() :
+    
+    num_particles = 100000
+    domain_dims = (64, 64, 64)
+    dens = np.random.random(domain_dims) 
+    x = np.random.uniform(size=num_particles)
+    y = np.random.uniform(size=num_particles)
+    z = np.random.uniform(size=num_particles)
+    m = np.ones((num_particles))
+
+    # Field operators and cell flagging methods
+
+    fo = []
+    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"Density": 2.0}))
+    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"Density": 20.0}))
+    rc = [fm.flagging_method_registry["overdensity"](1.0)]
+    
+    # Check that all of this runs ok without particles
+    
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
+    amr0 = refine_amr(ug0, rc, fo, 3)
+
+    grid_data = []
+    
+    for grid in amr0.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+    
+        for field in amr0.h.field_list :
+            
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+
+    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
+                        
+    # Now add particles
+
+    fields1 = {"Density": dens,
+               "particle_position_x": x,
+               "particle_position_y": y,
+               "particle_position_z": z,
+               "particle_mass": m,
+               "number_of_particles": num_particles}
+
+    fields2 = fields1.copy()
+
+    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
+    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.h.grids])
+    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.h.grids])
+    
+    assert number_of_particles1 == num_particles
+    assert number_of_particles1 == number_of_particles2
+
+    # Check to make sure the fields have been defined correctly
+    
+    assert ug1.field_info["particle_position_x"].particle_type
+    assert ug1.field_info["particle_position_y"].particle_type
+    assert ug1.field_info["particle_position_z"].particle_type
+    assert ug1.field_info["particle_mass"].particle_type
+    assert not ug1.field_info["Density"].particle_type
+
+    assert ug2.field_info["particle_position_x"].particle_type
+    assert ug2.field_info["particle_position_y"].particle_type
+    assert ug2.field_info["particle_position_z"].particle_type
+    assert ug2.field_info["particle_mass"].particle_type
+    assert not ug2.field_info["Density"].particle_type
+    
+    # Now refine this
+
+    amr1 = refine_amr(ug1, rc, fo, 3)
+    
+    grid_data = []
+    
+    for grid in amr1.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+
+        for field in amr1.h.field_list :
+
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+    
+    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)
+
+    # Check everything again
+
+    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.h.grids]
+    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.h.grids]
+    
+    assert np.sum(number_of_particles1) == num_particles
+    assert_equal(number_of_particles1, number_of_particles2)
+    
+    assert amr1.field_info["particle_position_x"].particle_type
+    assert amr1.field_info["particle_position_y"].particle_type
+    assert amr1.field_info["particle_position_z"].particle_type
+    assert amr1.field_info["particle_mass"].particle_type
+    assert not amr1.field_info["Density"].particle_type
+    
+    assert amr2.field_info["particle_position_x"].particle_type
+    assert amr2.field_info["particle_position_y"].particle_type
+    assert amr2.field_info["particle_position_z"].particle_type
+    assert amr2.field_info["particle_mass"].particle_type
+    assert not amr2.field_info["Density"].particle_type
+


diff -r 787cf887d824ca8694bc52ee4448e8e6d38b022d -r 592cf5fd7b89992a7641363399240fc09be3e406 yt/frontends/stream/tests/test_update_data.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -0,0 +1,23 @@
+from yt.testing import *
+from yt.data_objects.profiles import BinnedProfile1D
+from numpy.random import uniform
+
+def setup():
+    global pf
+    pf = fake_random_pf(64, nprocs=8)
+    pf.h
+    
+def test_update_data() :
+    dims = (32,32,32)
+    grid_data = [{"Temperature":uniform(size=dims)}
+                 for i in xrange(pf.h.num_grids)]
+    pf.h.update_data(grid_data)
+    prj = pf.h.proj(2, "Temperature")
+    prj["Temperature"]
+    dd = pf.h.all_data()
+    profile = BinnedProfile1D(dd, 10, "Density",
+                              dd["Density"].min(),
+                              dd["Density"].max())
+    profile.add_fields(["Temperature"])
+    profile["Temperature"]
+                              



https://bitbucket.org/yt_analysis/yt/changeset/25c5dc0982b4/
changeset:   25c5dc0982b4
branch:      yt
user:        jzuhone
date:        2012-11-28 20:12:35
summary:     Particle generator classes
affected #:  1 file

diff -r 592cf5fd7b89992a7641363399240fc09be3e406 -r 25c5dc0982b4e4c7661c09cacca6c6b59933d0ba yt/utilities/particle_generator.py
--- /dev/null
+++ b/yt/utilities/particle_generator.py
@@ -0,0 +1,307 @@
+import numpy as np
+import h5py
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+class ParticleGenerator(object) :
+
+    default_fields = ["particle_position_x",
+                      "particle_position_y",
+                      "particle_position_z",
+                      "particle_index"]
+
+    def __init__(self, pf, num_particles, field_list) :
+        
+        self.pf = pf
+        self.num_particles = num_particles
+        self.field_list = field_list
+            
+        try :
+            self.posx_index = self.field_list.index(self.default_fields[0])
+            self.posy_index = self.field_list.index(self.default_fields[1])
+            self.posz_index = self.field_list.index(self.default_fields[2])
+            self.index_index = self.field_list.index(self.default_fields[3])
+        except :
+            raise KeyError("Field list must contain the following fields: " +
+                           "\'particle_position_x\', \'particle_position_y\'" +
+                           ", \'particle_position_z\', \'particle_index\' ")
+
+        self.num_grids = self.pf.h.num_grids
+        self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
+        self.ParticleIndices = np.zeros(self.num_grids + 1, dtype='int64')
+        
+        self.num_fields = len(self.field_list)
+        
+        self.particles = np.zeros((self.num_particles, self.num_fields),
+                                  dtype='float64')
+
+    def has_key(self, key) :
+
+        return (key in self.field_list)
+            
+    def keys(self) :
+
+        return self.field_list.keys()
+    
+    def __getitem__(self, key) :
+
+        """
+        Get the field associated with key.
+        """
+
+        return self.particles[:,self.field_list.index(key)]
+    
+    def __setitem__(self, key, val) :
+        
+        """
+        Sets a field to be some other value.
+        """
+
+        self.particles[:,self.field_list.index(key)] = val[:]
+                
+    def __len__(self) :
+
+        """
+        The number of particles
+        """
+        
+        return self.num_particles
+
+    def get_from_grid(self, grid) :
+
+        ind = grid.id-grid._id_offset
+        
+        start = self.ParticleIndices[ind]
+        end = self.ParticleIndices[ind+1]
+
+        return dict([(field, self.particles[start:end,self.field_list.index(field)])
+                     for field in self.field_list])
+    
+    def setup_particles(self,x,y,z) :
+
+        particle_grids, particle_grid_inds = self.pf.h.find_points(x,y,z)
+
+        idxs = np.argsort(particle_grid_inds)
+
+        self.particles[:,self.posx_index] = x[idxs]
+        self.particles[:,self.posy_index] = y[idxs]
+        self.particles[:,self.posz_index] = z[idxs]
+
+        self.NumberOfParticles = np.bincount(particle_grid_inds,
+                                             minlength=self.num_grids)
+
+        if self.num_grids > 1 :
+            np.add.accumulate(self.NumberOfParticles.squeeze(),
+                              out=self.ParticleIndices[1:])
+        else :
+            self.ParticleIndices[1] = self.NumberOfParticles.squeeze()
+
+        return idxs
+    
+    def assign_indices(self, function=None, **kwargs) :
+
+        if function is None :
+            self.particles[:,self.index_index] = np.arange((self.num_particles))
+        else :
+            self.particles[:,self.index_index] = function(**kwargs)
+            
+    def map_grid_fields_to_particles(self, mapping_dict) :
+        
+        pbar = get_pbar("Mapping fields to particles", self.num_grids)
+        
+        for i, grid in enumerate(self.pf.h.grids) :
+
+            pbar.update(i)
+            
+            if self.NumberOfParticles[i] >  0:
+
+                start = self.ParticleIndices[i]
+                end = self.ParticleIndices[i+1]
+
+                # Note we add one ghost zone!
+                
+                cube = grid.retrieve_ghost_zones(1, mapping_dict.keys())
+
+                le = np.array(grid.LeftEdge).astype(np.float64)
+                dims = np.array(grid.ActiveDimensions).astype(np.int32)
+                
+                for gfield, pfield in mapping_dict.items() :
+
+                    field_index = self.field_list.index(pfield)
+                    
+                    CICSample_3(self.particles[start:end,self.posx_index],
+                                self.particles[start:end,self.posy_index],
+                                self.particles[start:end,self.posz_index],
+                                self.particles[start:end,field_index],
+                                np.int64(self.NumberOfParticles[i]),
+                                cube[gfield], le, dims,
+                                np.float64(grid['dx']))
+
+        pbar.finish()
+
+    def apply_to_stream(self) :
+
+        grid_data = []
+        
+        for i,g in enumerate(self.pf.h.grids) :
+
+            data = {}
+                        
+            data["number_of_particles"] = self.NumberOfParticles[i] + \
+                                          g.NumberOfParticles
+
+            grid_particles = self.get_from_grid(g)
+            
+            for field in self.field_list :
+
+                if data["number_of_particles"] > 0 :
+                
+                    if g.NumberOfParticles > 0 :
+                        data[field] = np.concatenate(g[field],
+                                                     grid_particles[field])
+                    else :
+                        data[field] = grid_particles[field]
+                        
+                else :
+
+                    data[field] = np.array([], dtype='float64')
+
+            grid_data.append(data)
+            
+        self.pf.h.update_data(grid_data)
+
+class FromListParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, num_particles, data) :
+
+        field_list = data.keys()
+
+        x = data["particle_position_x"]
+        y = data["particle_position_y"]
+        z = data["particle_position_z"]
+
+        xcond = np.logical_or(x < pf.domain_left_edge[0],
+                              x >= pf.domain_right_edge[0])
+        ycond = np.logical_or(y < pf.domain_left_edge[1],
+                              y >= pf.domain_right_edge[1])
+        zcond = np.logical_or(z < pf.domain_left_edge[2],
+                              z >= pf.domain_right_edge[2])
+
+        cond = np.logical_or(xcond, ycond)
+        cond = np.logical_or(zcond, cond)
+
+        if np.any(cond) :
+            raise ValueError("Some particles are outside of the domain!!!")
+            
+        ParticleGenerator.__init__(self, pf, num_particles, field_list)
+
+        idxs = self.setup_particles(x,y,z)
+        
+        for field in field_list :
+            
+            self.particles[:,self.field_list.index(field)] = data[field][idxs]
+
+class LatticeParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, particles_dims, particles_left_edge,
+                 particles_right_edge, field_list) :
+
+        num_x = particles_dims[0]
+        num_y = particles_dims[1]
+        num_z = particles_dims[2]
+        
+        xmin = particles_left_edge[0]
+        ymin = particles_left_edge[1]
+        zmin = particles_left_edge[2]
+        
+        xmax = particles_right_edge[0]
+        ymax = particles_right_edge[1]
+        zmax = particles_right_edge[2]
+                                
+        xcond = (xmin < pf.domain_left_edge[0]) or \
+                (xmax >= pf.domain_right_edge[0])
+        ycond = (ymin < pf.domain_left_edge[1]) or \
+                (ymax >= pf.domain_right_edge[1])
+        zcond = (zmin < pf.domain_left_edge[2]) or \
+                (zmax >= pf.domain_right_edge[2])
+                
+        cond = xcond or ycond or zcond
+
+        if cond :
+            raise ValueError("Proposed bounds for particles are outside domain!!!")
+        
+        ParticleGenerator.__init__(self, pf, num_x*num_y*num_z, field_list)
+
+        dx = (xmax-xmin)/(num_x-1)
+        dy = (ymax-ymin)/(num_y-1)
+        dz = (zmax-zmin)/(num_z-1)
+        
+        inds = np.indices((num_x,num_y,num_z))
+        xpos = inds[0]*dx + xmin
+        ypos = inds[1]*dy + ymin
+        zpos = inds[2]*dz + zmin
+        
+        self.setup_particles(xpos.flat, ypos.flat, zpos.flat)
+        
+class WithDensityParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, data_source, num_particles, field_list,
+                 density_field="Density") :
+
+        ParticleGenerator.__init__(self, pf, num_particles, field_list)
+
+        num_cells = len(data_source["x"].flat)
+        
+        max_density = data_source[density_field].max()
+
+        num_particles_left = num_particles
+
+        all_x = []
+        all_y = []
+        all_z = []
+        
+        pbar = get_pbar("Generating Particles", num_particles)
+
+        tot_num_accepted = int(0)
+        
+        while num_particles_left > 0:
+
+            rho = np.random.uniform(high=1.01*max_density,
+                                    size=num_particles_left)
+
+            idxs = np.random.random_integers(low=0, high=num_cells-1,
+                                             size=num_particles_left)
+
+            rho_true = data_source[density_field].flat[idxs]
+            accept = rho <= rho_true
+            num_accepted = accept.sum()
+
+            accepted_idxs = idxs[accept]
+            
+            xpos = data_source["x"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dx"].flat[accepted_idxs]
+            ypos = data_source["y"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dy"].flat[accepted_idxs]
+            zpos = data_source["z"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dz"].flat[accepted_idxs]
+
+            all_x.append(xpos)
+            all_y.append(ypos)
+            all_z.append(zpos)
+
+            num_particles_left -= num_accepted
+            tot_num_accepted += num_accepted
+            
+            pbar.update(tot_num_accepted)
+
+        pbar.finish()
+
+        x = np.concatenate(all_x)
+        y = np.concatenate(all_y)
+        z = np.concatenate(all_z)
+
+        self.setup_particles(x,y,z)
+        



https://bitbucket.org/yt_analysis/yt/changeset/6e157ece026c/
changeset:   6e157ece026c
branch:      yt
user:        jzuhone
date:        2012-11-30 06:54:42
summary:     Removed whitespace and added docstrings. Removed particle generator for now.
affected #:  2 files

diff -r 25c5dc0982b4e4c7661c09cacca6c6b59933d0ba -r 6e157ece026c17e28070a49625b786066c669988 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -259,27 +259,26 @@
             self.io = io_registry[self.data_style](self.stream_handler)
 
     def update_data(self, data) :
+
+        """
+        Update the stream data with a new data dict. If fields already exist,
+        they will be replaced, but if they do not, they will be added. Fields
+        already in the stream but not part of the data dict will be left
+        alone. 
+        """
         
         particle_types = set_particle_types(data[0])
 
         for key in data[0].keys() :
-
             if key is "number_of_particles": continue
-            
             self.stream_handler.particle_types[key] = particle_types[key]
-            
             if key not in self.field_list:
                 self.field_list.append(key)
 
-        
         for i, grid in enumerate(self.grids) :
-            
             if data[i].has_key("number_of_particles") :
-
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
-                
             for key in data[i].keys() :
-
                 self.stream_handler.fields[grid.id][key] = data[i][key]
             
         self._setup_unknown_fields()
@@ -351,6 +350,11 @@
     return particle_types
 
 def assign_particle_data(pf, pdata) :
+
+    """
+    Assign particle data to the grids using find_points. This
+    will overwrite any existing particle data, so be careful!
+    """
     
     if pf.h.num_grids > 1 :
 
@@ -362,36 +366,26 @@
             raise KeyError("Cannot decompose particle data without position fields!")
         
         particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
-        
         idxs = np.argsort(particle_grid_inds)
-        
         particle_grid_count = np.bincount(particle_grid_inds,
                                           minlength=pf.h.num_grids)
         particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
-        
         if pf.h.num_grids > 1 :
             np.add.accumulate(particle_grid_count.squeeze(),
                               out=particle_indices[1:])
         else :
             particle_indices[1] = particle_grid_count.squeeze()
-            
-        pdata.pop("number_of_particles")
-        
+    
+        pdata.pop("number_of_particles")    
         grid_pdata = []
         
         for i, pcount in enumerate(particle_grid_count) :
-            
             grid = {}
-            
             grid["number_of_particles"] = pcount
-            
             start = particle_indices[i]
             end = particle_indices[i+1]
-            
             for key in pdata.keys() :
-                
                 grid[key] = pdata[key][idxs][start:end]
-                
             grid_pdata.append(grid)
 
     else :


diff -r 25c5dc0982b4e4c7661c09cacca6c6b59933d0ba -r 6e157ece026c17e28070a49625b786066c669988 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ /dev/null
@@ -1,307 +0,0 @@
-import numpy as np
-import h5py
-from yt.utilities.lib import CICSample_3
-from yt.funcs import *
-
-class ParticleGenerator(object) :
-
-    default_fields = ["particle_position_x",
-                      "particle_position_y",
-                      "particle_position_z",
-                      "particle_index"]
-
-    def __init__(self, pf, num_particles, field_list) :
-        
-        self.pf = pf
-        self.num_particles = num_particles
-        self.field_list = field_list
-            
-        try :
-            self.posx_index = self.field_list.index(self.default_fields[0])
-            self.posy_index = self.field_list.index(self.default_fields[1])
-            self.posz_index = self.field_list.index(self.default_fields[2])
-            self.index_index = self.field_list.index(self.default_fields[3])
-        except :
-            raise KeyError("Field list must contain the following fields: " +
-                           "\'particle_position_x\', \'particle_position_y\'" +
-                           ", \'particle_position_z\', \'particle_index\' ")
-
-        self.num_grids = self.pf.h.num_grids
-        self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
-        self.ParticleIndices = np.zeros(self.num_grids + 1, dtype='int64')
-        
-        self.num_fields = len(self.field_list)
-        
-        self.particles = np.zeros((self.num_particles, self.num_fields),
-                                  dtype='float64')
-
-    def has_key(self, key) :
-
-        return (key in self.field_list)
-            
-    def keys(self) :
-
-        return self.field_list.keys()
-    
-    def __getitem__(self, key) :
-
-        """
-        Get the field associated with key.
-        """
-
-        return self.particles[:,self.field_list.index(key)]
-    
-    def __setitem__(self, key, val) :
-        
-        """
-        Sets a field to be some other value.
-        """
-
-        self.particles[:,self.field_list.index(key)] = val[:]
-                
-    def __len__(self) :
-
-        """
-        The number of particles
-        """
-        
-        return self.num_particles
-
-    def get_from_grid(self, grid) :
-
-        ind = grid.id-grid._id_offset
-        
-        start = self.ParticleIndices[ind]
-        end = self.ParticleIndices[ind+1]
-
-        return dict([(field, self.particles[start:end,self.field_list.index(field)])
-                     for field in self.field_list])
-    
-    def setup_particles(self,x,y,z) :
-
-        particle_grids, particle_grid_inds = self.pf.h.find_points(x,y,z)
-
-        idxs = np.argsort(particle_grid_inds)
-
-        self.particles[:,self.posx_index] = x[idxs]
-        self.particles[:,self.posy_index] = y[idxs]
-        self.particles[:,self.posz_index] = z[idxs]
-
-        self.NumberOfParticles = np.bincount(particle_grid_inds,
-                                             minlength=self.num_grids)
-
-        if self.num_grids > 1 :
-            np.add.accumulate(self.NumberOfParticles.squeeze(),
-                              out=self.ParticleIndices[1:])
-        else :
-            self.ParticleIndices[1] = self.NumberOfParticles.squeeze()
-
-        return idxs
-    
-    def assign_indices(self, function=None, **kwargs) :
-
-        if function is None :
-            self.particles[:,self.index_index] = np.arange((self.num_particles))
-        else :
-            self.particles[:,self.index_index] = function(**kwargs)
-            
-    def map_grid_fields_to_particles(self, mapping_dict) :
-        
-        pbar = get_pbar("Mapping fields to particles", self.num_grids)
-        
-        for i, grid in enumerate(self.pf.h.grids) :
-
-            pbar.update(i)
-            
-            if self.NumberOfParticles[i] >  0:
-
-                start = self.ParticleIndices[i]
-                end = self.ParticleIndices[i+1]
-
-                # Note we add one ghost zone!
-                
-                cube = grid.retrieve_ghost_zones(1, mapping_dict.keys())
-
-                le = np.array(grid.LeftEdge).astype(np.float64)
-                dims = np.array(grid.ActiveDimensions).astype(np.int32)
-                
-                for gfield, pfield in mapping_dict.items() :
-
-                    field_index = self.field_list.index(pfield)
-                    
-                    CICSample_3(self.particles[start:end,self.posx_index],
-                                self.particles[start:end,self.posy_index],
-                                self.particles[start:end,self.posz_index],
-                                self.particles[start:end,field_index],
-                                np.int64(self.NumberOfParticles[i]),
-                                cube[gfield], le, dims,
-                                np.float64(grid['dx']))
-
-        pbar.finish()
-
-    def apply_to_stream(self) :
-
-        grid_data = []
-        
-        for i,g in enumerate(self.pf.h.grids) :
-
-            data = {}
-                        
-            data["number_of_particles"] = self.NumberOfParticles[i] + \
-                                          g.NumberOfParticles
-
-            grid_particles = self.get_from_grid(g)
-            
-            for field in self.field_list :
-
-                if data["number_of_particles"] > 0 :
-                
-                    if g.NumberOfParticles > 0 :
-                        data[field] = np.concatenate(g[field],
-                                                     grid_particles[field])
-                    else :
-                        data[field] = grid_particles[field]
-                        
-                else :
-
-                    data[field] = np.array([], dtype='float64')
-
-            grid_data.append(data)
-            
-        self.pf.h.update_data(grid_data)
-
-class FromListParticleGenerator(ParticleGenerator) :
-
-    def __init__(self, pf, num_particles, data) :
-
-        field_list = data.keys()
-
-        x = data["particle_position_x"]
-        y = data["particle_position_y"]
-        z = data["particle_position_z"]
-
-        xcond = np.logical_or(x < pf.domain_left_edge[0],
-                              x >= pf.domain_right_edge[0])
-        ycond = np.logical_or(y < pf.domain_left_edge[1],
-                              y >= pf.domain_right_edge[1])
-        zcond = np.logical_or(z < pf.domain_left_edge[2],
-                              z >= pf.domain_right_edge[2])
-
-        cond = np.logical_or(xcond, ycond)
-        cond = np.logical_or(zcond, cond)
-
-        if np.any(cond) :
-            raise ValueError("Some particles are outside of the domain!!!")
-            
-        ParticleGenerator.__init__(self, pf, num_particles, field_list)
-
-        idxs = self.setup_particles(x,y,z)
-        
-        for field in field_list :
-            
-            self.particles[:,self.field_list.index(field)] = data[field][idxs]
-
-class LatticeParticleGenerator(ParticleGenerator) :
-
-    def __init__(self, pf, particles_dims, particles_left_edge,
-                 particles_right_edge, field_list) :
-
-        num_x = particles_dims[0]
-        num_y = particles_dims[1]
-        num_z = particles_dims[2]
-        
-        xmin = particles_left_edge[0]
-        ymin = particles_left_edge[1]
-        zmin = particles_left_edge[2]
-        
-        xmax = particles_right_edge[0]
-        ymax = particles_right_edge[1]
-        zmax = particles_right_edge[2]
-                                
-        xcond = (xmin < pf.domain_left_edge[0]) or \
-                (xmax >= pf.domain_right_edge[0])
-        ycond = (ymin < pf.domain_left_edge[1]) or \
-                (ymax >= pf.domain_right_edge[1])
-        zcond = (zmin < pf.domain_left_edge[2]) or \
-                (zmax >= pf.domain_right_edge[2])
-                
-        cond = xcond or ycond or zcond
-
-        if cond :
-            raise ValueError("Proposed bounds for particles are outside domain!!!")
-        
-        ParticleGenerator.__init__(self, pf, num_x*num_y*num_z, field_list)
-
-        dx = (xmax-xmin)/(num_x-1)
-        dy = (ymax-ymin)/(num_y-1)
-        dz = (zmax-zmin)/(num_z-1)
-        
-        inds = np.indices((num_x,num_y,num_z))
-        xpos = inds[0]*dx + xmin
-        ypos = inds[1]*dy + ymin
-        zpos = inds[2]*dz + zmin
-        
-        self.setup_particles(xpos.flat, ypos.flat, zpos.flat)
-        
-class WithDensityParticleGenerator(ParticleGenerator) :
-
-    def __init__(self, pf, data_source, num_particles, field_list,
-                 density_field="Density") :
-
-        ParticleGenerator.__init__(self, pf, num_particles, field_list)
-
-        num_cells = len(data_source["x"].flat)
-        
-        max_density = data_source[density_field].max()
-
-        num_particles_left = num_particles
-
-        all_x = []
-        all_y = []
-        all_z = []
-        
-        pbar = get_pbar("Generating Particles", num_particles)
-
-        tot_num_accepted = int(0)
-        
-        while num_particles_left > 0:
-
-            rho = np.random.uniform(high=1.01*max_density,
-                                    size=num_particles_left)
-
-            idxs = np.random.random_integers(low=0, high=num_cells-1,
-                                             size=num_particles_left)
-
-            rho_true = data_source[density_field].flat[idxs]
-            accept = rho <= rho_true
-            num_accepted = accept.sum()
-
-            accepted_idxs = idxs[accept]
-            
-            xpos = data_source["x"].flat[accepted_idxs] + \
-                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
-                   data_source["dx"].flat[accepted_idxs]
-            ypos = data_source["y"].flat[accepted_idxs] + \
-                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
-                   data_source["dy"].flat[accepted_idxs]
-            zpos = data_source["z"].flat[accepted_idxs] + \
-                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
-                   data_source["dz"].flat[accepted_idxs]
-
-            all_x.append(xpos)
-            all_y.append(ypos)
-            all_z.append(zpos)
-
-            num_particles_left -= num_accepted
-            tot_num_accepted += num_accepted
-            
-            pbar.update(tot_num_accepted)
-
-        pbar.finish()
-
-        x = np.concatenate(all_x)
-        y = np.concatenate(all_y)
-        z = np.concatenate(all_z)
-
-        self.setup_particles(x,y,z)
-        



https://bitbucket.org/yt_analysis/yt/changeset/e053d21a0f8a/
changeset:   e053d21a0f8a
branch:      yt
user:        MatthewTurk
date:        2012-11-30 13:07:44
summary:     Merged in jzuhone/yt (pull request #354)
affected #:  4 files

diff -r e48ea3fd050760cc778fd19a73f2f2580bc8fb8c -r e053d21a0f8ab1cec9376f17bccecf491b674a08 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -105,7 +105,7 @@
 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None):
+                 fields, io = None, particle_types = {}):
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions
@@ -116,10 +116,18 @@
         self.num_grids = self.levels.size
         self.fields = fields
         self.io = io
-
+        self.particle_types = particle_types
+            
     def get_fields(self):
         return self.fields.all_fields
 
+    def get_particle_type(self, field) :
+
+        if self.particle_types.has_key(field) :
+            return self.particle_types[field]
+        else :
+            return False
+        
 class StreamHierarchy(AMRHierarchy):
 
     grid = StreamGrid
@@ -150,11 +158,12 @@
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
+            ptype = self.stream_handler.get_particle_type(field)
             # Note that we call add_field on the field_info directly.  This
             # will allow the same field detection mechanism to work for 1D, 2D
             # and 3D fields.
             self.pf.field_info.add_field(
-                    field, lambda a, b: None,
+                    field, lambda a, b: None, particle_type = ptype,
                     convert_function=cf, take_log=False)
 
     def _parse_hierarchy(self):
@@ -249,6 +258,32 @@
         else:
             self.io = io_registry[self.data_style](self.stream_handler)
 
+    def update_data(self, data) :
+
+        """
+        Update the stream data with a new data dict. If fields already exist,
+        they will be replaced, but if they do not, they will be added. Fields
+        already in the stream but not part of the data dict will be left
+        alone. 
+        """
+        
+        particle_types = set_particle_types(data[0])
+
+        for key in data[0].keys() :
+            if key is "number_of_particles": continue
+            self.stream_handler.particle_types[key] = particle_types[key]
+            if key not in self.field_list:
+                self.field_list.append(key)
+
+        for i, grid in enumerate(self.grids) :
+            if data[i].has_key("number_of_particles") :
+                grid.NumberOfParticles = data[i].pop("number_of_particles")
+            for key in data[i].keys() :
+                self.stream_handler.fields[grid.id][key] = data[i][key]
+            
+        self._setup_unknown_fields()
+        self._detect_fields()
+        
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_fallback = StreamFieldInfo
@@ -299,8 +334,68 @@
     @property
     def all_fields(self): return self[0].keys()
 
+def set_particle_types(data) :
+
+    particle_types = {}
+    
+    for key in data.keys() :
+
+        if key is "number_of_particles": continue
+        
+        if len(data[key].shape) == 1:
+            particle_types[key] = True
+        else :
+            particle_types[key] = False
+    
+    return particle_types
+
+def assign_particle_data(pf, pdata) :
+
+    """
+    Assign particle data to the grids using find_points. This
+    will overwrite any existing particle data, so be careful!
+    """
+    
+    if pf.h.num_grids > 1 :
+
+        try :
+            x = pdata["particle_position_x"]
+            y = pdata["particle_position_y"]
+            z = pdata["particle_position_z"]
+        except:
+            raise KeyError("Cannot decompose particle data without position fields!")
+        
+        particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+        idxs = np.argsort(particle_grid_inds)
+        particle_grid_count = np.bincount(particle_grid_inds,
+                                          minlength=pf.h.num_grids)
+        particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
+        if pf.h.num_grids > 1 :
+            np.add.accumulate(particle_grid_count.squeeze(),
+                              out=particle_indices[1:])
+        else :
+            particle_indices[1] = particle_grid_count.squeeze()
+    
+        pdata.pop("number_of_particles")    
+        grid_pdata = []
+        
+        for i, pcount in enumerate(particle_grid_count) :
+            grid = {}
+            grid["number_of_particles"] = pcount
+            start = particle_indices[i]
+            end = particle_indices[i+1]
+            for key in pdata.keys() :
+                grid[key] = pdata[key][idxs][start:end]
+            grid_pdata.append(grid)
+
+    else :
+
+        grid_pdata = [pdata]
+        
+    pf.h.update_data(grid_pdata)
+                                        
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0, number_of_particles=0):
+                      nprocs=1, sim_time=0.0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -312,6 +407,9 @@
           disappointing or non-existent in most cases.
         * Particles may be difficult to integrate.
 
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
     Parameters
     ----------
     data : dict
@@ -326,8 +424,6 @@
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -347,14 +443,29 @@
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
-
+    
+    if data.has_key("number_of_particles") :
+        number_of_particles = data.pop("number_of_particles")
+    else :
+        number_of_particles = int(0)
+    
+    if number_of_particles > 0 :
+        particle_types = set_particle_types(data)
+        pdata = {}
+        pdata["number_of_particles"] = number_of_particles
+        for key in data.keys() :
+            if len(data[key].shape) == 1 :
+                pdata[key] = data.pop(key)
+    else :
+        particle_types = {}
+    
     if nprocs > 1:
         temp = {}
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
-                decompose_array(data[key], psize, bbox)
+                             decompose_array(data[key], psize, bbox)
             grid_dimensions = np.array([grid.shape for grid in temp[key]],
                                        dtype="int32")
         for gid in range(nprocs):
@@ -375,9 +486,10 @@
         grid_dimensions,
         grid_levels,
         -np.ones(nprocs, dtype='int64'),
-        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        particle_types=particle_types
     )
 
     handler.name = "UniformGridData"
@@ -396,10 +508,16 @@
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    # Now figure out where the particles go
+
+    if number_of_particles > 0 :
+        assign_particle_data(spf, pdata)
+    
     return spf
 
 def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                   sim_time=0.0, number_of_particles=0):
+                   sim_time=0.0):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -418,8 +536,9 @@
     grid_data : list of dicts
         This is a list of dicts.  Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
-        assumed to be fields.  This will be modified in place and can't be
-        assumed to be static..
+        assumed to be fields.  They also may include a particle count, otherwise
+        assumed to be zero. This will be modified in place and can't be
+        assumed to be static.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
     sim_unit_to_cm : float
@@ -428,8 +547,6 @@
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -438,11 +555,13 @@
     ...     dict(left_edge = [0.0, 0.0, 0.0],
     ...          right_edge = [1.0, 1.0, 1.],
     ...          level = 0,
-    ...          dimensions = [32, 32, 32]),
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ...     dict(left_edge = [0.25, 0.25, 0.25],
     ...          right_edge = [0.75, 0.75, 0.75],
     ...          level = 1,
-    ...          dimensions = [32, 32, 32])
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ... ]
     ... 
     >>> for g in grid_data:
@@ -461,23 +580,27 @@
     grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    number_of_particles = np.zeros((ngrids,1), dtype='int64')
     sfh = StreamDictFieldHandler()
     for i, g in enumerate(grid_data):
         grid_left_edges[i,:] = g.pop("left_edge")
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        if g.has_key("number_of_particles") :
+            number_of_particles[i,:] = g.pop("number_of_particles")  
         sfh[i] = g
-
+            
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         None, # parent_ids is none
-        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        number_of_particles,
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
+        particle_types=set_particle_types(grid_data[0])
     )
 
     handler.name = "AMRGridData"
@@ -529,6 +652,20 @@
     >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
     >>> pf = refine_amr(ug, rc, fo, 5)
     """
+
+    # If we have particle data, set it aside for now
+
+    number_of_particles = np.sum([grid.NumberOfParticles
+                                  for grid in base_pf.h.grids])
+
+    if number_of_particles > 0 :
+        pdata = {}
+        for field in base_pf.h.field_list :
+            if base_pf.field_info[field].particle_type :
+                pdata[field] = np.concatenate([grid[field]
+                                               for grid in base_pf.h.grids])
+        pdata["number_of_particles"] = number_of_particles
+        
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
@@ -545,7 +682,8 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                gd[field] = g[field]
+                if not pf.field_info[field].particle_type :
+                    gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
             fg = FlaggingGrid(g, refinement_criteria)
@@ -557,8 +695,14 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    gd[field] = grid[field]
+                    if not pf.field_info[field].particle_type :
+                        gd[field] = grid[field]
                 grid_data.append(gd)
         pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
         cur_gc = pf.h.num_grids
+
+    # Now reassign particle data to grids
+
+    if number_of_particles > 0 : assign_particle_data(pf, pdata)
+    
     return pf




diff -r e48ea3fd050760cc778fd19a73f2f2580bc8fb8c -r e053d21a0f8ab1cec9376f17bccecf491b674a08 yt/frontends/stream/tests/test_stream_particles.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -0,0 +1,130 @@
+import numpy as np
+from yt.testing import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_particles() :
+    
+    num_particles = 100000
+    domain_dims = (64, 64, 64)
+    dens = np.random.random(domain_dims) 
+    x = np.random.uniform(size=num_particles)
+    y = np.random.uniform(size=num_particles)
+    z = np.random.uniform(size=num_particles)
+    m = np.ones((num_particles))
+
+    # Field operators and cell flagging methods
+
+    fo = []
+    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"Density": 2.0}))
+    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"Density": 20.0}))
+    rc = [fm.flagging_method_registry["overdensity"](1.0)]
+    
+    # Check that all of this runs ok without particles
+    
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
+    amr0 = refine_amr(ug0, rc, fo, 3)
+
+    grid_data = []
+    
+    for grid in amr0.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+    
+        for field in amr0.h.field_list :
+            
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+
+    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
+                        
+    # Now add particles
+
+    fields1 = {"Density": dens,
+               "particle_position_x": x,
+               "particle_position_y": y,
+               "particle_position_z": z,
+               "particle_mass": m,
+               "number_of_particles": num_particles}
+
+    fields2 = fields1.copy()
+
+    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
+    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.h.grids])
+    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.h.grids])
+    
+    assert number_of_particles1 == num_particles
+    assert number_of_particles1 == number_of_particles2
+
+    # Check to make sure the fields have been defined correctly
+    
+    assert ug1.field_info["particle_position_x"].particle_type
+    assert ug1.field_info["particle_position_y"].particle_type
+    assert ug1.field_info["particle_position_z"].particle_type
+    assert ug1.field_info["particle_mass"].particle_type
+    assert not ug1.field_info["Density"].particle_type
+
+    assert ug2.field_info["particle_position_x"].particle_type
+    assert ug2.field_info["particle_position_y"].particle_type
+    assert ug2.field_info["particle_position_z"].particle_type
+    assert ug2.field_info["particle_mass"].particle_type
+    assert not ug2.field_info["Density"].particle_type
+    
+    # Now refine this
+
+    amr1 = refine_amr(ug1, rc, fo, 3)
+    
+    grid_data = []
+    
+    for grid in amr1.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+
+        for field in amr1.h.field_list :
+
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+    
+    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)
+
+    # Check everything again
+
+    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.h.grids]
+    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.h.grids]
+    
+    assert np.sum(number_of_particles1) == num_particles
+    assert_equal(number_of_particles1, number_of_particles2)
+    
+    assert amr1.field_info["particle_position_x"].particle_type
+    assert amr1.field_info["particle_position_y"].particle_type
+    assert amr1.field_info["particle_position_z"].particle_type
+    assert amr1.field_info["particle_mass"].particle_type
+    assert not amr1.field_info["Density"].particle_type
+    
+    assert amr2.field_info["particle_position_x"].particle_type
+    assert amr2.field_info["particle_position_y"].particle_type
+    assert amr2.field_info["particle_position_z"].particle_type
+    assert amr2.field_info["particle_mass"].particle_type
+    assert not amr2.field_info["Density"].particle_type
+


diff -r e48ea3fd050760cc778fd19a73f2f2580bc8fb8c -r e053d21a0f8ab1cec9376f17bccecf491b674a08 yt/frontends/stream/tests/test_update_data.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -0,0 +1,23 @@
+from yt.testing import *
+from yt.data_objects.profiles import BinnedProfile1D
+from numpy.random import uniform
+
+def setup():
+    global pf
+    pf = fake_random_pf(64, nprocs=8)
+    pf.h
+    
+def test_update_data() :
+    dims = (32,32,32)
+    grid_data = [{"Temperature":uniform(size=dims)}
+                 for i in xrange(pf.h.num_grids)]
+    pf.h.update_data(grid_data)
+    prj = pf.h.proj(2, "Temperature")
+    prj["Temperature"]
+    dd = pf.h.all_data()
+    profile = BinnedProfile1D(dd, 10, "Density",
+                              dd["Density"].min(),
+                              dd["Density"].max())
+    profile.add_fields(["Temperature"])
+    profile["Temperature"]
+

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list