[yt-svn] commit/yt-3.0: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Aug 9 19:44:10 PDT 2012


2 new commits in yt-3.0:


https://bitbucket.org/yt_analysis/yt-3.0/changeset/9d5c0e8e54ee/
changeset:   9d5c0e8e54ee
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-10 01:05:40
summary:     A few more changes to particle IO for Enzo, and a NumberOfActiveParticles
attribute for Enzo grids.
affected #:  2 files

diff -r 7e96504cc5344671a63cbbe689f60b82b882f068 -r 9d5c0e8e54ee125a31bd3518ba46290af10afb97 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -66,7 +66,7 @@
     Class representing a single Enzo Grid instance.
     """
 
-    __slots__ = []
+    __slots__ = ["NumberOfActiveParticles"]
     def __init__(self, id, hierarchy):
         """
         Returns an instance of EnzoGrid with *id*, associated with
@@ -458,6 +458,8 @@
     def _populate_grid_objects(self):
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
+            g.NumberOfActiveParticles = \
+                self.grid_active_particle_count[g.id - g._id_offset,0]
             g._setup_dx()
             g.set_filename(f[0])
             #if g.Parent is not None: g._guess_properties_from_parent()


diff -r 7e96504cc5344671a63cbbe689f60b82b882f068 -r 9d5c0e8e54ee125a31bd3518ba46290af10afb97 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -54,6 +54,10 @@
     def _read_exception(self):
         return (exceptions.KeyError, hdf5_light_reader.ReadingError)
 
+    def _read_particle_selection_by_type(self, chunks, selector, fields):
+        raise NotImplementedError
+            
+
     def _read_particle_selection(self, chunks, selector, fields):
         last = None
         rv = {}
@@ -61,8 +65,11 @@
         # Now we have to do something unpleasant
         dobj = chunks[0].dobj
         if any((ftype != "all" for ftype, fname in fields)):
-           return self._read_particle_selection_by_type(
-                    chunks, selector, fields)
+            type_fields = [(ftype, fname) for ftype, fname in fields
+                           if ftype != all]
+            rv.update(self._read_particle_selection_by_type(
+                      chunks, selector, fields))
+            if len(rv) == len(fields): return rv
         mylog.debug("First pass: counting particles.")
         xn, yn, zn = ("particle_position_%s" % ax for ax in 'xyz')
         size = 0



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0f3bd806c1fd/
changeset:   0f3bd806c1fd
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-10 04:42:47
summary:     Active particles can now be read.  See bb://MatthewTurk/yt.milestones for more
examples, but this now works:

dd = pf.h.all_data()
print dd["AccretingParticle", "AccretionRate"]
affected #:  4 files

diff -r 9d5c0e8e54ee125a31bd3518ba46290af10afb97 -r 0f3bd806c1fdbd8f4080e0a34c7f04ca2f6160c5 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -915,7 +915,7 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
-        self.particle_types = []
+        self.particle_types = ["all"]
         for ptype in self.parameters.get("AppendActiveParticleType", []):
             self.particle_types.append(ptype)
 


diff -r 9d5c0e8e54ee125a31bd3518ba46290af10afb97 -r 0f3bd806c1fdbd8f4080e0a34c7f04ca2f6160c5 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -55,27 +55,61 @@
         return (exceptions.KeyError, hdf5_light_reader.ReadingError)
 
     def _read_particle_selection_by_type(self, chunks, selector, fields):
-        raise NotImplementedError
-            
+        # Active particles don't have the particle_ prefix.
+        rv = {}
+        ptypes = list(set([ftype for ftype, fname in fields]))
+        fields = list(set(fields))
+        if len(ptypes) > 1: raise NotImplementedError
+        pfields = [(ptypes[0], "position_%s" % ax) for ax in 'xyz']
+        size = 0
+        for chunk in chunks:
+            data = self._read_chunk_data(chunk, pfields, 'active', 
+                        "/ActiveParticles/%s" % ptypes[0])
+            for g in chunk.objs:
+                if g.NumberOfActiveParticles == 0: continue
+                x, y, z = (data[g.id].pop(fn) for ft, fn in pfields)
+                size += g.count_particles(selector, x, y, z)
+        read_fields = fields[:]
+        for field in fields:
+            # TODO: figure out dataset types
+            rv[field] = na.empty(size, dtype='float64')
+        for pfield in pfields:
+            if pfield not in fields: read_fields.append(pfield)
+        ind = 0
+        for chunk in chunks:
+            data = self._read_chunk_data(chunk, read_fields, 'active',
+                        "/ActiveParticles/%s" % ptypes[0])
+            for g in chunk.objs:
+                if g.NumberOfActiveParticles == 0: continue
+                x, y, z = (data[g.id][fn] for ft, fn in pfields)
+                mask = g.select_particles(selector, x, y, z)
+                if mask is None: continue
+                for field in set(fields):
+                    ftype, fname = field
+                    gdata = data[g.id].pop(fname)[mask]
+                    rv[field][ind:ind+gdata.size] = gdata
+                ind += gdata.size
+                data.pop(g.id)
+        return rv
 
     def _read_particle_selection(self, chunks, selector, fields):
         last = None
         rv = {}
         chunks = list(chunks)
         # Now we have to do something unpleasant
-        dobj = chunks[0].dobj
         if any((ftype != "all" for ftype, fname in fields)):
             type_fields = [(ftype, fname) for ftype, fname in fields
                            if ftype != all]
             rv.update(self._read_particle_selection_by_type(
-                      chunks, selector, fields))
+                      chunks, selector, type_fields))
             if len(rv) == len(fields): return rv
+            fields = [f for f in fields if f not in rv]
         mylog.debug("First pass: counting particles.")
         xn, yn, zn = ("particle_position_%s" % ax for ax in 'xyz')
         size = 0
         pfields = [("all", "particle_position_%s" % ax) for ax in 'xyz']
         for chunk in chunks:
-            data = self._read_chunk_data(chunk, pfields, True)
+            data = self._read_chunk_data(chunk, pfields, 'any')
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 x, y, z = (data[g.id].pop("particle_position_%s" % ax)
@@ -92,7 +126,7 @@
                    size, [f2 for f1, f2 in fields], ng)
         ind = 0
         for chunk in chunks:
-            data = self._read_chunk_data(chunk, read_fields, True)
+            data = self._read_chunk_data(chunk, read_fields, 'any')
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 x, y, z = (data[g.id]["particle_position_%s" % ax]
@@ -133,18 +167,23 @@
                 data.pop(g.id)
         return rv
 
-    def _read_chunk_data(self, chunk, fields, filter_particles = False):
+    def _read_chunk_data(self, chunk, fields, filter_particles = False,
+                         suffix = ""):
         data = {}
         grids_by_file = defaultdict(list)
         for g in chunk.objs:
-            if filter_particles and g.NumberOfParticles == 0: continue
+            if filter_particles == 'any' and g.NumberOfParticles == 0:
+                continue
+            elif filter_particles == 'active' and \
+                 g.NumberOfActiveParticles == 0:
+                continue
             grids_by_file[g.filename].append(g.id)
         sets = [fname for ftype, fname in fields]
         for filename in grids_by_file:
             nodes = grids_by_file[filename]
             nodes.sort()
             data.update(hdf5_light_reader.ReadMultipleGrids(
-                filename, nodes, sets))
+                filename, nodes, sets, suffix))
         return data
 
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):


diff -r 9d5c0e8e54ee125a31bd3518ba46290af10afb97 -r 0f3bd806c1fdbd8f4080e0a34c7f04ca2f6160c5 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -322,7 +322,8 @@
                     fields_to_read)
         for field in fields_to_read:
             ftype, fname = field
-            conv_factor = self.pf.field_info[fname]._convert_function(self)
+            finfo = dobj._get_field_info(*field)
+            conv_factor = finfo._convert_function(self)
             na.multiply(fields_to_return[field], conv_factor,
                         fields_to_return[field])
         return fields_to_return, fields_to_generate


diff -r 9d5c0e8e54ee125a31bd3518ba46290af10afb97 -r 0f3bd806c1fdbd8f4080e0a34c7f04ca2f6160c5 yt/utilities/hdf5_light_reader.c
--- a/yt/utilities/hdf5_light_reader.c
+++ b/yt/utilities/hdf5_light_reader.c
@@ -582,13 +582,14 @@
     // Format arguments
 
     char *filename = NULL;
+    char *suffix = "";
     PyObject *grid_ids = NULL;
     PyObject *set_names = NULL;
     Py_ssize_t num_sets = 0;
     Py_ssize_t num_grids = 0;
 
-    if (!PyArg_ParseTuple(args, "sOO",
-            &filename, &grid_ids, &set_names))
+    if (!PyArg_ParseTuple(args, "sOO|s",
+            &filename, &grid_ids, &set_names, &suffix))
         return PyErr_Format(_hdf5ReadError,
                "ReadMultipleGrids: Invalid parameters.");
 
@@ -604,7 +605,7 @@
     file_id = grid_node = 0;
     int i, n;
     long id;
-    char grid_node_name[13]; // Grid + 8 + \0
+    char grid_node_name[256]; // Grid + 8 + \0
 
     /* Similar to the way Enzo does it, we're going to set the file access
        property to store bigger bits in RAM. */
@@ -621,11 +622,12 @@
     for(i = 0; i < num_grids; i++) {
         grid_key = PyList_GetItem(grid_ids, i);
         id = PyInt_AsLong(grid_key);
-        sprintf(grid_node_name, "Grid%08li", id);
+        sprintf(grid_node_name, "Grid%08li%s", id, suffix);
         grid_data = PyDict_New(); // New reference
         PyDict_SetItem(grids_dict, grid_key, grid_data);
         grid_node = H5Gopen(file_id, grid_node_name);
         if (grid_node < 0) {
+              H5Eprint(stderr);
               PyErr_Format(_hdf5ReadError,
                   "ReadHDF5DataSet: Error opening (%s, %s)",
                   filename, grid_node_name);

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list