[yt-svn] commit/yt-3.0: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Jul 27 15:13:44 PDT 2013


9 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/ba71fd3be64a/
Changeset:   ba71fd3be64a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-26 01:01:00
Summary:     Adding ability to update particle field values in deposit.
Affected #:  2 files

diff -r 7ffc40eb23159d00fa634703480757d0fed7088a -r ba71fd3be64a04b2f28219892d237f2c290b2f59 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -63,6 +63,7 @@
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
     cdef public int bad_indices
+    cdef int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
                       np.float64_t ppos[3], np.float64_t *fields)

diff -r 7ffc40eb23159d00fa634703480757d0fed7088a -r ba71fd3be64a04b2f28219892d237f2c290b2f59 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -38,6 +38,7 @@
 cdef class ParticleDepositOperation:
     def __init__(self, nvals):
         self.nvals = nvals
+        self.update_values = 0 # This is the default
 
     def initialize(self, *args):
         raise NotImplementedError
@@ -102,6 +103,9 @@
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)
+            if self.update_values == 1:
+                for j in range(nf):
+                    field_pointers[j][i] = field_vals[j] 
         
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -132,6 +136,9 @@
             for j in range(3):
                 pos[j] = positions[i, j]
             self.process(dims, left_edge, dds, 0, pos, field_vals)
+            if self.update_values == 1:
+                for j in range(nf):
+                    field_pointers[j][i] = field_vals[j] 
 
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
@@ -375,7 +382,7 @@
         self.ow = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
-    
+
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
                       np.float64_t left_edge[3], 
@@ -393,5 +400,4 @@
     def finalize(self):
         return self.owf / self.ow
 
-deposit_weighted_mean= WeightedMeanParticleField
-
+deposit_weighted_mean = WeightedMeanParticleField


https://bitbucket.org/yt_analysis/yt-3.0/commits/84b51f709758/
Changeset:   84b51f709758
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-26 02:00:48
Summary:     Adding box identifier deposition function.
Affected #:  5 files

diff -r ba71fd3be64a04b2f28219892d237f2c290b2f59 -r 84b51f709758677dbaa0a46442389a528e6be58d yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -489,6 +489,7 @@
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
+        if vals is None: return
         return vals.reshape(self.ActiveDimensions, order="C")
 
     def _get_selector_mask(self, selector):

diff -r ba71fd3be64a04b2f28219892d237f2c290b2f59 -r 84b51f709758677dbaa0a46442389a528e6be58d yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -121,6 +121,7 @@
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
             self.domain_id, self._domain_offset)
         vals = op.finalize()
+        if vals is None: return
         return np.asfortranarray(vals)
 
     def select_icoords(self, dobj):

diff -r ba71fd3be64a04b2f28219892d237f2c290b2f59 -r 84b51f709758677dbaa0a46442389a528e6be58d yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -113,6 +113,19 @@
             particle_type = True,
             units = r"\mathrm{M}_\odot")
 
+    def particle_box_ids(field, data):
+        pos = data[ptype, coord_name]
+        ids = np.zeros(pos.shape[0], dtype="float64") - 1
+        # This is float64 in name only.  It will be properly cast inside the
+        # deposit operation.
+        #_ids = ids.view("float64")
+        data.deposit(pos, [ids], method = "box_identifier")
+        return ids
+    registry.add_field((ptype, "BoxIdentifier"),
+            function= particle_box_ids,
+            validators = [ValidateSpatial()],
+            particle_type = True)
+
     return list(set(registry.keys()).difference(orig))
 
 

diff -r ba71fd3be64a04b2f28219892d237f2c290b2f59 -r 84b51f709758677dbaa0a46442389a528e6be58d yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -66,4 +66,5 @@
     cdef int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
-                      np.float64_t ppos[3], np.float64_t *fields)
+                      np.float64_t ppos[3], np.float64_t *fields,
+                      np.int64_t domain_ind)

diff -r ba71fd3be64a04b2f28219892d237f2c290b2f59 -r 84b51f709758677dbaa0a46442389a528e6be58d yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -102,7 +102,7 @@
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
-                         offset, pos, field_vals)
+                         offset, pos, field_vals, oct.domain_ind)
             if self.update_values == 1:
                 for j in range(nf):
                     field_pointers[j][i] = field_vals[j] 
@@ -120,6 +120,7 @@
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
         field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
+        cdef np.int64_t gid = gobj.id
         for i in range(nf):
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
@@ -135,14 +136,15 @@
                 field_vals[j] = field_pointers[j][i]
             for j in range(3):
                 pos[j] = positions[i, j]
-            self.process(dims, left_edge, dds, 0, pos, field_vals)
+            self.process(dims, left_edge, dds, 0, pos, field_vals, gid)
             if self.update_values == 1:
                 for j in range(nf):
                     field_pointers[j][i] = field_vals[j] 
 
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
-                      np.float64_t ppos[3], np.float64_t *fields):
+                      np.float64_t ppos[3], np.float64_t *fields,
+                      np.int64_t domain_ind):
         raise NotImplementedError
 
 cdef class CountParticles(ParticleDepositOperation):
@@ -161,7 +163,8 @@
                       np.float64_t dds[3],
                       np.int64_t offset, # offset into IO field
                       np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields # any other fields we need
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
                       ):
         # here we do our thing; this is the kernel
         cdef int ii[3], i
@@ -197,7 +200,8 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
                       ):
         cdef int ii[3], half_len, ib0[3], ib1[3]
         cdef int i, j, k
@@ -250,7 +254,8 @@
                       np.float64_t dds[3],
                       np.int64_t offset, 
                       np.float64_t ppos[3],
-                      np.float64_t *fields 
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
                       ):
         cdef int ii[3], i
         for i in range(3):
@@ -296,7 +301,8 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
                       ):
         cdef int ii[3], i, cell_index
         cdef float k, mk, qk
@@ -338,7 +344,8 @@
                       np.float64_t dds[3],
                       np.int64_t offset, # offset into IO field
                       np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields # any other fields we need
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
                       ):
         
         cdef int i, j, k, ind[3], ii
@@ -389,7 +396,8 @@
                       np.float64_t dds[3],
                       np.int64_t offset, 
                       np.float64_t ppos[3],
-                      np.float64_t *fields 
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
                       ):
         cdef int ii[3], i
         for i in range(3):
@@ -401,3 +409,26 @@
         return self.owf / self.ow
 
 deposit_weighted_mean = WeightedMeanParticleField
+
+cdef class BoxIdentifier(ParticleDepositOperation):
+    # This is a tricky one!  What it does is put into the particle array the
+    # value of the oct or block (grids will always be zero) identifier that a
+    # given particle resides in
+    def initialize(self):
+        self.update_values = 1
+
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
+                      ):
+        fields[0] = domain_ind
+
+    def finalize(self):
+        return
+
+deposit_box_identifier = BoxIdentifier


https://bitbucket.org/yt_analysis/yt-3.0/commits/a0d9555cf0ce/
Changeset:   a0d9555cf0ce
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-26 02:08:11
Summary:     Fix bug when filtering particles and assigning child indices.

Using the new box identifier, I looked the histogram of particles-in-boxes.
This exceeded n_ref by a good factor.  Delving deeper, I realized that there
was an asymmetry in how the levels were assigned and handled between when octs
were refined and not refined.  This resolves that asymmetry.
Affected #:  1 file

diff -r 84b51f709758677dbaa0a46442389a528e6be58d -r a0d9555cf0ce78ee908778a21b80c32ef052483e yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -190,7 +190,7 @@
                 if cur.children == NULL or \
                    cur.children[cind(ind[0],ind[1],ind[2])] == NULL:
                     cur = self.refine_oct(cur, index, level)
-                    self.filter_particles(cur, data, p, level + 1)
+                    self.filter_particles(cur, data, p, level)
                 else:
                     cur = cur.children[cind(ind[0],ind[1],ind[2])]
             cur.file_ind += 1
@@ -215,7 +215,7 @@
                     o.children[cind(i,j,k)] = noct
         o.file_ind = self.n_ref + 1
         for i in range(3):
-            ind[i] = (index >> ((ORDER_MAX - (level + 1))*3 + (2 - i))) & 1
+            ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
         noct = o.children[cind(ind[0],ind[1],ind[2])]
         return noct
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/f452958964ee/
Changeset:   f452958964ee
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-26 02:16:57
Summary:     Arbitrary grids and other mock grids don't have IDs.  Set them to -1 to
indicate as such.
Affected #:  1 file

diff -r a0d9555cf0ce78ee908778a21b80c32ef052483e -r f452958964eef3cb8b2788c97cc88d60ceda5198 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -120,7 +120,7 @@
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
         field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
-        cdef np.int64_t gid = gobj.id
+        cdef np.int64_t gid = getattr(gobj, "id", -1)
         for i in range(nf):
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data


https://bitbucket.org/yt_analysis/yt-3.0/commits/e17a67c00413/
Changeset:   e17a67c00413
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-26 20:12:30
Summary:     Adding n_ref as an option to particle constructorswas able to .
Affected #:  3 files

diff -r f452958964eef3cb8b2788c97cc88d60ceda5198 -r e17a67c004133257489ccceb8b5ddaad7d398804 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -151,7 +151,8 @@
 
     def __init__(self, filename, data_style="gadget_binary",
                  additional_fields = (),
-                 unit_base = None):
+                 unit_base = None, n_ref = 64):
+        self.n_ref = n_ref
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -264,10 +265,11 @@
     _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS"):
+    def __init__(self, filename, data_style="OWLS", n_ref = 64):
         self.storage_filename = None
         super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               unit_base = None)
+                                               unit_base = None,
+                                               n_ref = n_ref)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]
@@ -357,7 +359,9 @@
                  domain_left_edge = None,
                  domain_right_edge = None,
                  unit_base = None,
-                 cosmology_parameters = None):
+                 cosmology_parameters = None,
+                 n_ref = 64):
+        self.n_ref = n_ref
         self.endian = endian
         self.storage_filename = None
         if domain_left_edge is None:

diff -r f452958964eef3cb8b2788c97cc88d60ceda5198 -r e17a67c004133257489ccceb8b5ddaad7d398804 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -733,9 +733,11 @@
     _data_style = "stream_particles"
     file_count = 1
     filename_template = "stream_file"
+    n_ref = 64
 
 def load_particles(data, sim_unit_to_cm, bbox=None,
-                      sim_time=0.0, periodicity=(True, True, True)):
+                      sim_time=0.0, periodicity=(True, True, True),
+                      n_ref = 64):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -764,6 +766,9 @@
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
+    n_ref : int
+        The number of particles that result in refining an oct used for
+        indexing the particles.
 
     Examples
     --------
@@ -818,6 +823,7 @@
     handler.cosmology_simulation = 0
 
     spf = StreamParticlesStaticOutput(handler)
+    spf.n_ref = n_ref
     spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0

diff -r f452958964eef3cb8b2788c97cc88d60ceda5198 -r e17a67c004133257489ccceb8b5ddaad7d398804 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -87,7 +87,7 @@
         pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
             [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
-        self.oct_handler.n_ref = 64
+        self.oct_handler.n_ref = pf.n_ref
         mylog.info("Allocating for %0.3e particles", self.total_particles)
         # No more than 256^3 in the region finder.
         N = min(len(self.data_files), 256) 


https://bitbucket.org/yt_analysis/yt-3.0/commits/06fcd4b850b2/
Changeset:   06fcd4b850b2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-26 20:17:18
Summary:     Changing box ids to mesh ids, and changing case.
Affected #:  2 files

diff -r e17a67c004133257489ccceb8b5ddaad7d398804 -r 06fcd4b850b288f0f868de12ec7b9c9a150594b5 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -113,16 +113,16 @@
             particle_type = True,
             units = r"\mathrm{M}_\odot")
 
-    def particle_box_ids(field, data):
+    def particle_mesh_ids(field, data):
         pos = data[ptype, coord_name]
         ids = np.zeros(pos.shape[0], dtype="float64") - 1
         # This is float64 in name only.  It will be properly cast inside the
         # deposit operation.
         #_ids = ids.view("float64")
-        data.deposit(pos, [ids], method = "box_identifier")
+        data.deposit(pos, [ids], method = "mesh_id")
         return ids
-    registry.add_field((ptype, "BoxIdentifier"),
-            function= particle_box_ids,
+    registry.add_field((ptype, "mesh_id"),
+            function = particle_mesh_ids,
             validators = [ValidateSpatial()],
             particle_type = True)
 

diff -r e17a67c004133257489ccceb8b5ddaad7d398804 -r 06fcd4b850b288f0f868de12ec7b9c9a150594b5 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -410,7 +410,7 @@
 
 deposit_weighted_mean = WeightedMeanParticleField
 
-cdef class BoxIdentifier(ParticleDepositOperation):
+cdef class MeshIdentifier(ParticleDepositOperation):
     # This is a tricky one!  What it does is put into the particle array the
     # value of the oct or block (grids will always be zero) identifier that a
     # given particle resides in
@@ -431,4 +431,4 @@
     def finalize(self):
         return
 
-deposit_box_identifier = BoxIdentifier
+deposit_mesh_id = MeshIdentifier


https://bitbucket.org/yt_analysis/yt-3.0/commits/d862e626b627/
Changeset:   d862e626b627
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-27 01:15:55
Summary:     Updating array values for particle octree tests.
Affected #:  1 file

diff -r 06fcd4b850b288f0f868de12ec7b9c9a150594b5 -r d862e626b627e079e2f45fae98e16c9f12c6de3f yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -35,7 +35,7 @@
         # This visits every cell -- including those covered by octs.
         #for dom in range(ndom):
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
-        yield assert_equal, total_count, [1, 8, 64, 104, 184, 480, 1680, 1480]
+        yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
 if __name__=="__main__":
     for i in test_add_particles_random():


https://bitbucket.org/yt_analysis/yt-3.0/commits/1c75bda6fe8c/
Changeset:   1c75bda6fe8c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-27 04:41:08
Summary:     Adding n_ref tests for particle counts in Octs.
Affected #:  2 files

diff -r d862e626b627e079e2f45fae98e16c9f12c6de3f -r 1c75bda6fe8c71e450a92684c7ca87afd1157c65 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -31,6 +31,7 @@
       StreamHandler, \
       load_uniform_grid, \
       load_amr_grids, \
+      load_particles, \
       refine_amr
 
 from .fields import \

diff -r d862e626b627e079e2f45fae98e16c9f12c6de3f -r 1c75bda6fe8c71e450a92684c7ca87afd1157c65 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -3,6 +3,8 @@
 from yt.geometry.particle_oct_container import ParticleOctreeContainer
 from yt.geometry.oct_container import _ORDER_MAX
 from yt.utilities.lib.geometry_utils import get_morton_indices
+from yt.frontends.stream.api import load_particles
+import yt.data_objects.api
 import time, os
 
 NPART = 32**3
@@ -37,6 +39,26 @@
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
         yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
+def test_particle_octree_counts():
+    np.random.seed(int(0x4d3d3d3))
+    # Eight times as many!
+    pos = []
+    data = {}
+    bbox = []
+    for i, ax in enumerate('xyz'):
+        DW = DRE[i] - DLE[i]
+        LE = DLE[i]
+        data["particle_position_%s" % ax] = \
+            np.random.normal(0.5, scale=0.05, size=(NPART*8)) * DW + LE
+        bbox.append( [DLE[i], DRE[i]] )
+    bbox = np.array(bbox)
+    for n_ref in [16, 32, 64, 512, 1024]:
+        pf = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
+        dd = pf.h.all_data()
+        bi = dd["all","mesh_id"]
+        v = np.bincount(bi.astype("int64"))
+        yield assert_equal, v.max() <= n_ref, True
+
 if __name__=="__main__":
     for i in test_add_particles_random():
         i[0](*i[1:])


https://bitbucket.org/yt_analysis/yt-3.0/commits/c7f0b4c36217/
Changeset:   c7f0b4c36217
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-28 00:13:25
Summary:     Merging with Nathan's tip
Affected #:  4 files

diff -r 1c75bda6fe8c71e450a92684c7ca87afd1157c65 -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,13 +12,16 @@
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c
 yt/geometry/oct_container.c
+yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
+yt/geometry/particle_oct_container.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
+yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c

diff -r 1c75bda6fe8c71e450a92684c7ca87afd1157c65 -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -675,7 +675,7 @@
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, npart):
+    def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions[:,:1] = ei
         self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
@@ -685,10 +685,12 @@
         self.grid_left_edge[:,1:] = 0.0
         self.grid_right_edge[:,1:] = 1.0
         self.grid_dimensions[:,1:] = 1
+        if nap is not None:
+            raise NotImplementedError
 
 class EnzoHierarchy2D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, npart):
+    def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions[:,:2] = ei
         self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
@@ -698,6 +700,8 @@
         self.grid_left_edge[:,2] = 0.0
         self.grid_right_edge[:,2] = 1.0
         self.grid_dimensions[:,2] = 1
+        if nap is not None:
+            raise NotImplementedError
 
 class EnzoStaticOutput(StaticOutput):
     """

diff -r 1c75bda6fe8c71e450a92684c7ca87afd1157c65 -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -277,6 +277,33 @@
                         (grid.id, field)).transpose()
         return t
 
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        # Now we have to do something unpleasant
+        chunks = list(chunks)
+        if selector.__class__.__name__ == "GridSelector":
+            return self._read_grid_chunk(chunks, fields)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        for field in fields:
+            ftype, fname = field
+            fsize = size
+            rv[field] = np.empty(fsize, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                   size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        for chunk in chunks:
+            data = self._read_chunk_data(chunk, fields)
+            for g in chunk.objs:
+                for field in fields:
+                    ftype, fname = field
+                    ds = np.atleast_3d(data[g.id].pop(fname))
+                    nd = g.select(selector, ds, rv[field], ind)  # caches
+                ind += nd
+                data.pop(g.id)
+        return rv
+
 
 class IOHandlerPacked1D(IOHandlerPackedHDF5):
 

diff -r 1c75bda6fe8c71e450a92684c7ca87afd1157c65 -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -28,6 +28,7 @@
 import stat
 import weakref
 import struct
+import glob
 from itertools import izip
 
 from yt.utilities.fortran_utils import read_record
@@ -46,6 +47,7 @@
     G, \
     gravitational_constant_cgs, \
     km_per_pc, \
+    cm_per_kpc, \
     mass_sun_cgs
 from yt.utilities.cosmology import Cosmology
 from .fields import \
@@ -107,11 +109,12 @@
         mpch = {}
         mpch.update(mpc_conversion)
         unit_base = self._unit_base or {}
-        for unit in mpc_conversion:
-            mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
-            mpch['%shcm' % unit] = (mpch["%sh" % unit] / 
-                    (1 + self.current_redshift))
-            mpch['%scm' % unit] = mpch[unit] / (1 + self.current_redshift)
+        if self.cosmological_simulation:
+            for unit in mpc_conversion:
+                mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
+                mpch['%shcm' % unit] = (mpch["%sh" % unit] /
+                                (1 + self.current_redshift))
+                mpch['%scm' % unit] = mpch[unit] / (1 + self.current_redshift)
         # ud == unit destination
         # ur == unit registry
         for ud, ur in [(self.units, mpch), (self.time_units, sec_conversion)]:
@@ -360,6 +363,7 @@
                  domain_right_edge = None,
                  unit_base = None,
                  cosmology_parameters = None,
+                 parameter_file = None,
                  n_ref = 64):
         self.n_ref = n_ref
         self.endian = endian
@@ -379,6 +383,7 @@
 
         self._unit_base = unit_base or {}
         self._cosmology_parameters = cosmology_parameters
+        self._param_file = parameter_file
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):
@@ -386,44 +391,74 @@
 
     def _parse_parameter_file(self):
 
-        # The entries in this header are capitalized and named to match Table 4
-        # in the GADGET-2 user guide.
+        # Parsing the header of the tipsy file, from this we obtain
+        # the snapshot time and particle counts.
 
         f = open(self.parameter_filename, "rb")
         hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
         hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
                      struct.unpack(hh, f.read(struct.calcsize(hh))))])
+        self.parameters.update(hvals)
         self._header_offset = f.tell()
 
+        # These are always true, for now.
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = "sph"
+
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # Set standard values
 
-        # This may not be correct.
+        # Read in parameter file, if available.
+        if self._param_file is None:
+            pfn = glob.glob(os.path.join(self.directory, "*.param"))
+            assert len(pfn) < 2, \
+                "More than one param file is in the data directory"
+            if pfn == []:
+                pfn = None
+            else:
+                pfn = pfn[0]
+        else:
+            pfn = self._param_file
+
+        if pfn is not None:
+            for line in (l.strip() for l in open(pfn)):
+                # skip comment lines and blank lines
+                l = line.strip()
+                if l.startswith('#') or l == '':
+                    continue
+                # parse parameters according to tipsy parameter type
+                param, val = (i.strip() for i in line.split('=',1))
+                if param.startswith('n') or param.startswith('i'):
+                    val = long(val)
+                elif param.startswith('d'):
+                    val = float(val)
+                elif param.startswith('b'):
+                    val = bool(float(val))
+                self.parameters[param] = val
+
         self.current_time = hvals["time"]
+        self.domain_dimensions = np.ones(3, "int32") * 2
+        if self.parameters.get('bPeriodic', True):
+            self.periodicity = (True, True, True)
+        else:
+            self.periodicity = (False, False, False)
 
-        # NOTE: These are now set in the main initializer.
-        #self.domain_left_edge = np.zeros(3, "float64") - 0.5
-        #self.domain_right_edge = np.ones(3, "float64") + 0.5
-        self.domain_dimensions = np.ones(3, "int32") * 2
-        self.periodicity = (True, True, True)
-
-        self.cosmological_simulation = 1
-
-        cosm = self._cosmology_parameters or {}
-        dcosm = dict(current_redshift = 0.0,
-                     omega_lambda = 0.0,
-                     omega_matter = 0.0,
-                     hubble_constant = 1.0)
-        for param in ['current_redshift', 'omega_lambda',
-                      'omega_matter', 'hubble_constant']:
-            pval = cosm.get(param, dcosm[param])
-            setattr(self, param, pval)
-
-        self.parameters = hvals
+        if self.parameters.get('bComove', True):
+            self.cosmological_simulation = 1
+            cosm = self._cosmology_parameters or {}
+            dcosm = dict(current_redshift = 0.0,
+                         omega_lambda = 0.0,
+                         omega_matter = 0.0,
+                         hubble_constant = 1.0)
+            for param in ['current_redshift', 'omega_lambda',
+                          'omega_matter', 'hubble_constant']:
+                pval = cosm.get(param, dcosm[param])
+                setattr(self, param, pval)
+        else:
+            self.cosmological_simulation = 0.0
+            kpc_unit = self.parameters.get('dKpcUnit', 1.0)
+            self._unit_base['cm'] = 1.0 / (kpc_unit * cm_per_kpc)
 
         self.filename_template = self.parameter_filename
         self.file_count = 1
@@ -432,12 +467,17 @@
 
     def _set_units(self):
         super(TipsyStaticOutput, self)._set_units()
-        DW = (self.domain_right_edge - self.domain_left_edge).max()
-        cosmo = Cosmology(self.hubble_constant * 100.0,
-                          self.omega_matter, self.omega_lambda)
-        length_unit = DW * self.units['cm'] # Get it in proper cm
-        density_unit = cosmo.CriticalDensity(self.current_redshift)
-        mass_unit = density_unit * length_unit**3
+        if self.cosmological_simulation:
+            DW = (self.domain_right_edge - self.domain_left_edge).max()
+            cosmo = Cosmology(self.hubble_constant * 100.0,
+                              self.omega_matter, self.omega_lambda)
+            length_unit = DW * self.units['cm'] # Get it in proper cm
+            density_unit = cosmo.CriticalDensity(self.current_redshift)
+            mass_unit = density_unit * length_unit**3
+        else:
+            mass_unit = self.parameters.get('dMsolUnit', 1.0) * mass_sun_cgs
+            length_unit = self.parameters.get('dKpcUnit', 1.0) * cm_per_kpc
+            density_unit = mass_unit / length_unit**3
         time_unit = 1.0 / np.sqrt(G*density_unit)
         velocity_unit = length_unit / time_unit
         self.conversion_factors["velocity"] = velocity_unit

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list