[yt-svn] commit/yt-3.0: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 2 16:17:19 PDT 2013


2 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/8ee62b09392c/
Changeset:   8ee62b09392c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 15:27:58
Summary:     Fixing FLASH reader.
Affected #:  2 files

diff -r ab5d001a882986a751b6f6edffeb4fdeb0ee49fc -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,13 +212,6 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_io(self, dobj):
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        # We'll take the max of 128 and the number of processors
-        nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
-        for gs in list_chunks(gobjs, nl):
-            yield YTDataChunk(dobj, "io", gs, self._count_selection)
-
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
     _fieldinfo_fallback = FLASHFieldInfo

diff -r ab5d001a882986a751b6f6edffeb4fdeb0ee49fc -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,6 +92,6 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
+                    data = ds[g.id - g._id_offset,:,:,:].transpose()
                     ind += g.select(selector, data, rv[field], ind) # caches
         return rv


https://bitbucket.org/yt_analysis/yt-3.0/commits/2d1f7f07cce0/
Changeset:   2d1f7f07cce0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 22:05:27
Summary:     Merged
Affected #:  24 files

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -286,7 +286,8 @@
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
+            mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+                get_memory_usage()/1024.)
             self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -219,11 +219,14 @@
         finfo = self.pf._get_field_info(*field)
         with self._field_type_state(ftype, finfo):
             if fname in self._container_fields:
-                return self._generate_container_field(field)
+                tr = self._generate_container_field(field)
             if finfo.particle_type:
-                return self._generate_particle_field(field)
+                tr = self._generate_particle_field(field)
             else:
-                return self._generate_fluid_field(field)
+                tr = self._generate_fluid_field(field)
+            if tr is None:
+                raise YTCouldNotGenerateField(field, self.pf)
+            return tr
 
     def _generate_fluid_field(self, field):
         # First we check the validator
@@ -246,7 +249,7 @@
         rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
-            for io_chunk in self.chunks([], "io"):
+            for io_chunk in self.chunks([], "io", cache = False):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     ind += self._current_chunk.objs[0].select(
                             self.selector, self[field], rv, ind)
@@ -279,8 +282,8 @@
             size = self._count_particles(ftype)
             rv = np.empty(size, dtype="float64")
             ind = 0
-            for io_chunk in self.chunks([], "io"):
-                for i,chunk in enumerate(self.chunks(field, "spatial")):
+            for io_chunk in self.chunks([], "io", cache = False):
+                for i, chunk in enumerate(self.chunks(field, "spatial")):
                     x, y, z = (self[ftype, 'particle_position_%s' % ax]
                                for ax in 'xyz')
                     if x.size == 0: continue
@@ -301,7 +304,7 @@
             if f1 == ftype:
                 return val.size
         size = 0
-        for io_chunk in self.chunks([], "io"):
+        for io_chunk in self.chunks([], "io", cache = False):
             for i,chunk in enumerate(self.chunks([], "spatial")):
                 x, y, z = (self[ftype, 'particle_position_%s' % ax]
                             for ax in 'xyz')

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -270,8 +270,12 @@
         return arr.reshape(self.ActiveDimensions, order="C")
 
     def __missing__(self, item):
-        if hasattr(self.pf, "field_info") and isinstance(item, tuple):
-            finfo = self.pf._get_field_info(*item)
+        if hasattr(self.pf, "field_info"):
+            if not isinstance(item, tuple):
+                field = ("unknown", item)
+            else:
+                field = item
+            finfo = self.pf._get_field_info(*field)
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
             if item in FI:
@@ -283,7 +287,7 @@
                 vv = finfo(self)
             except NeedsGridType as exc:
                 ngz = exc.ghost_zones
-                nfd = FieldDetector(self.nd + ngz * 2)
+                nfd = FieldDetector(self.nd + ngz * 2, pf = self.pf)
                 nfd._num_ghost_zones = ngz
                 vv = finfo(nfd)
                 if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -146,3 +146,4 @@
     registry.add_field((ptype, "Velocities"),
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
+

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -60,6 +60,7 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    storage_filename = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -50,3 +50,7 @@
         a_std = np.sqrt((ad["CellMass"] * (ad["Density"] - a_mean)**2).sum() / 
                         ad["CellMass"].sum())
         yield assert_rel_equal, my_std, a_std, 12
+
+if __name__ == "__main__":
+    for i in test_extrema():
+        i[0](*i[1:])

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -85,13 +85,17 @@
 
 def test_all_fields():
     for field in FieldInfo:
-        if field.startswith("CuttingPlane"): continue
-        if field.startswith("particle"): continue
-        if field.startswith("CIC"): continue
-        if field.startswith("WeakLensingConvergence"): continue
-        if field.startswith("DensityPerturbation"): continue
-        if field.startswith("Matter_Density"): continue
-        if field.startswith("Overdensity"): continue
+        if isinstance(field, types.TupleType):
+            fname = field[0]
+        else:
+            fname = field
+        if fname.startswith("CuttingPlane"): continue
+        if fname.startswith("particle"): continue
+        if fname.startswith("CIC"): continue
+        if fname.startswith("WeakLensingConvergence"): continue
+        if fname.startswith("DensityPerturbation"): continue
+        if fname.startswith("Matter_Density"): continue
+        if fname.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1005,13 +1005,6 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
-# We add these fields so that the field detector can use them
-for field in ["particle_position_%s" % ax for ax in "xyz"]:
-    # This marker should let everyone know not to use the fields, but NullFunc
-    # should do that, too.
-    add_field(("all", field), function=NullFunc, particle_type = True,
-        units=r"UNDEFINED")
-
 def _pdensity(field, data):
     pmass = data[('deposit','all_mass')]
     np.divide(pmass, data["CellVolume"], pmass)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -180,7 +180,7 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         """
         Since subsets are calculated per domain,
         i.e. per file, yield each domain at a time to
@@ -189,7 +189,8 @@
         """
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None,
+                              cache = cache)
 
 
 class ARTStaticOutput(StaticOutput):

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -232,11 +232,12 @@
     def _chunk_spatial(self, dobj, ngz):
         raise NotImplementedError
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         # _current_chunk is made from identify_base_chunk
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for chunk in oobjs:
-            yield YTDataChunk(dobj, "io", [chunk], self._data_size)
+            yield YTDataChunk(dobj, "io", [chunk], self._data_size,
+                              cache = cache)
 
     def _read_fluid_fields(self, fields, dobj, chunk=None):
         if len(fields) == 0:

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -388,14 +388,16 @@
     """
     particle_field = field.name[4:]
     pos = data[('all', 'Coordinates')]
+    # Get back into density
+    pden = data['all', 'particle_mass'] / data["CellVolume"] 
     top = data.deposit(
         pos,
-        [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+        [data[('all', particle_field)]*pden],
         method = 'cic'
         )
     bottom = data.deposit(
         pos,
-        [data[('all', 'particle_mass')]],
+        [pden],
         method = 'cic'
         )
     top[bottom == 0] = 0.0
@@ -507,11 +509,28 @@
     add_enzo_field(("all", pf), function=NullFunc, convert_function=cfunc,
               particle_type=True)
 
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
+for pf in ["creation_time", "dynamical_time", "metallicity_fraction"] \
+        + ["particle_position_%s" % ax for ax in 'xyz'] \
+        + ["particle_velocity_%s" % ax for ax in 'xyz']:
     add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
+
+def _convertParticleMass(data):
+    return data.convert("Density")*(data.convert("cm")**3.0)
+def _convertParticleMassMsun(data):
+    return data.convert("Density")*((data.convert("cm")**3.0)/mass_sun_cgs)
+# We have now multiplied by grid.dds.prod() inside the IO function.
+# So here we multiply just by the conversion to density.
+add_field(('all', "particle_mass"), function=NullFunc, 
+          particle_type=True, convert_function = _convertParticleMass)
+
+add_field("ParticleMass",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMass)
+add_field("ParticleMassMsun",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMassMsun)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -522,32 +541,6 @@
           validators=[ValidateDataField("creation_time")],
           particle_type=True, convert_function=_convertParticleAge)
 
-def _ParticleMass(field, data):
-    particles = data['all', "particle_mass"].astype('float64') * \
-                just_one(data["CellVolumeCode"].ravel())
-    # Note that we mandate grid-type here, so this is okay
-    return particles
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
-    return cf
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
-    return cf
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMass,
-          particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_IOLevelParticleMassMsun)
 
 #
 # Now we do overrides for 2D fields

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,6 +36,8 @@
 import numpy as np
 from yt.funcs import *
 
+_convert_mass = ("particle_mass",)
+
 class IOHandlerPackedHDF5(BaseIOHandler):
 
     _data_style = "enzo_packed_3d"
@@ -81,6 +83,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
                 data.pop(g.id)
@@ -130,6 +134,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
         return rv

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -74,6 +74,9 @@
     _hydro_offset = None
     _level_count = None
 
+    def __repr__(self):
+        return "RAMSESDomainFile: %i" % self.domain_id
+
     @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
@@ -345,10 +348,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler
@@ -398,6 +401,8 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
             self.units['%sh' % unit] = self.units[unit] * self.hubble_constant
+            self.units['%scm' % unit] = (self.units[unit] /
+                                          (1 + self.current_redshift))
             self.units['%shcm' % unit] = (self.units['%sh' % unit] /
                                           (1 + self.current_redshift))
         for unit in sec_conversion.keys():

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -32,11 +32,12 @@
     BaseIOHandler
 
 from yt.utilities.fortran_utils import read_record
-from yt.utilities.lib.geometry_utils import get_morton_indices, \
-    get_morton_indices_unravel
+from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
 
+CHUNKSIZE = 10000000
+
 _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
 class IOHandlerOWLS(BaseIOHandler):
@@ -104,16 +105,18 @@
         f = h5py.File(data_file.filename, "r")
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
         morton = np.empty(pcount, dtype='uint64')
-        DLE = data_file.pf.domain_left_edge
-        DRE = data_file.pf.domain_right_edge
-        dx = (DRE - DLE) / 2**_ORDER_MAX
         ind = 0
         for key in f.keys():
             if not key.startswith("PartType"): continue
-            pos = f[key]["Coordinates"][:].astype("float64")
+            ds = f[key]["Coordinates"]
+            dt = ds.dtype.newbyteorder("N") # Native
+            pos = np.empty(ds.shape, dtype=dt)
+            pos[:] = ds
             regions.add_data_file(pos, data_file.file_id)
-            pos = np.floor((pos - DLE)/dx).astype("uint64")
-            morton[ind:ind+pos.shape[0]] = get_morton_indices(pos)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
             ind += pos.shape[0]
         f.close()
         return morton
@@ -257,7 +260,6 @@
 
     def _initialize_index(self, data_file, regions):
         count = sum(data_file.total_particles.values())
-        dt = [("px", "float32"), ("py", "float32"), ("pz", "float32")]
         DLE = data_file.pf.domain_left_edge
         DRE = data_file.pf.domain_right_edge
         dx = (DRE - DLE) / 2**_ORDER_MAX
@@ -266,16 +268,10 @@
             # We add on an additionally 4 for the first record.
             f.seek(data_file._position_offset + 4)
             # The first total_particles * 3 values are positions
-            pp = np.fromfile(f, dtype = dt, count = count)
-        pos = np.column_stack([pp['px'], pp['py'], pp['pz']]).astype("float64")
-        del pp
-        regions.add_data_file(pos, data_file.file_id)
-        lx = np.floor((pos[:,0] - DLE[0])/dx[0]).astype("uint64")
-        ly = np.floor((pos[:,1] - DLE[1])/dx[1]).astype("uint64")
-        lz = np.floor((pos[:,2] - DLE[2])/dx[2]).astype("uint64")
-        del pos
-        morton = get_morton_indices_unravel(lx, ly, lz)
-        del lx, ly, lz
+            pp = np.fromfile(f, dtype = 'float32', count = count*3)
+            pp.shape = (count, 3)
+        regions.add_data_file(pp, data_file.file_id)
+        morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE)
         return morton
 
     def _count_particles(self, data_file):
@@ -431,34 +427,33 @@
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
                 if count == 0: continue
-                pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                 count = count)
-                mis = np.empty(3, dtype="float64")
-                mas = np.empty(3, dtype="float64")
-                for axi, ax in enumerate('xyz'):
-                    mi = pp["Coordinates"][ax].min()
-                    ma = pp["Coordinates"][ax].max()
-                    mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
-                    mis[axi] = mi
-                    mas[axi] = ma
-                if np.any(mis < pf.domain_left_edge) or \
-                   np.any(mas > pf.domain_right_edge):
-                    raise YTDomainOverflow(mis, mas,
-                                           pf.domain_left_edge,
-                                           pf.domain_right_edge)
-                fpos = np.empty((count, 3), dtype="float64")
-                fpos[:,0] = pp["Coordinates"]["x"]
-                fpos[:,1] = pp["Coordinates"]["y"]
-                fpos[:,2] = pp["Coordinates"]["z"]
-                regions.add_data_file(fpos, data_file.file_id)
-                del fpos
-                pos = np.empty((count, 3), dtype="uint64")
-                for axi, ax in enumerate("xyz"):
-                    coords = pp['Coordinates'][ax].astype("float64")
-                    coords = np.floor((coords - DLE[axi])/dx[axi])
-                    pos[:,axi] = coords
-                morton[ind:ind+count] = get_morton_indices(pos)
-                del pp, pos
+                start, stop = ind, ind + count
+                while ind < stop:
+                    c = min(CHUNKSIZE, stop - ind)
+                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
+                                     count = c)
+                    mis = np.empty(3, dtype="float64")
+                    mas = np.empty(3, dtype="float64")
+                    for axi, ax in enumerate('xyz'):
+                        mi = pp["Coordinates"][ax].min()
+                        ma = pp["Coordinates"][ax].max()
+                        mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+                        mis[axi] = mi
+                        mas[axi] = ma
+                    if np.any(mis < pf.domain_left_edge) or \
+                       np.any(mas > pf.domain_right_edge):
+                        raise YTDomainOverflow(mis, mas,
+                                               pf.domain_left_edge,
+                                               pf.domain_right_edge)
+                    pos = np.empty((pp.size, 3), dtype="float64")
+                    pos[:,0] = pp["Coordinates"]["x"]
+                    pos[:,1] = pp["Coordinates"]["y"]
+                    pos[:,2] = pp["Coordinates"]["z"]
+                    regions.add_data_file(pos, data_file.file_id)
+                    morton[ind:ind+c] = compute_morton(
+                        pos[:,0], pos[:,1], pos[:,2],
+                        DLE, DRE)
+                    ind += c
         mylog.info("Adding %0.3e particles", morton.size)
         return morton
 

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -42,6 +42,10 @@
 add_field = StreamFieldInfo.add_field
 
 add_stream_field("density", function = NullFunc)
+add_stream_field("x-velocity", function = NullFunc)
+add_stream_field("y-velocity", function = NullFunc)
+add_stream_field("z-velocity", function = NullFunc)
+
 add_field("Density", function = TranslationFunc("density"))
 
 add_stream_field("particle_position_x", function = NullFunc, particle_type=True)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -31,6 +31,7 @@
 from types import ClassType
 import numpy as np
 import abc
+import copy
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -166,8 +167,7 @@
         # First we construct our list of fields to check
         fields_to_check = []
         fields_to_allcheck = []
-        fields_to_add = []
-        for field in fi:
+        for field in fi.keys():
             finfo = fi[field]
             # Explicitly defined
             if isinstance(field, tuple):
@@ -179,13 +179,14 @@
                 fields_to_check.append(field)
                 continue
             # We do a special case for 'all' later
-            new_fields = [(pt, field) for pt in
-                          self.parameter_file.particle_types]
+            new_fields = []
+            for pt in self.parameter_file.particle_types:
+                new_fi = copy.copy(finfo)
+                new_fi.name = (pt, new_fi.name)
+                fi[new_fi.name] = new_fi
+                new_fields.append(new_fi.name)
             fields_to_check += new_fields
-            fields_to_add.extend( (new_field, fi[field]) for
-                                   new_field in new_fields )
             fields_to_allcheck.append(field)
-        fi.update(fields_to_add)
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)
@@ -402,8 +403,9 @@
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
         fields_to_return = self.io._read_particle_selection(
-                    self._chunk_io(dobj), selector,
-                    fields_to_read)
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read)
         for field in fields_to_read:
             ftype, fname = field
             finfo = self.pf._get_field_info(*field)
@@ -424,10 +426,11 @@
         fields_to_read, fields_to_generate = self._split_fields(fields)
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
-        fields_to_return = self.io._read_fluid_selection(self._chunk_io(dobj),
-                                                   selector,
-                                                   fields_to_read,
-                                                   chunk_size)
+        fields_to_return = self.io._read_fluid_selection(
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read,
+            chunk_size)
         for field in fields_to_read:
             ftype, fname = field
             conv_factor = self.pf.field_info[fname]._convert_function(self)
@@ -452,20 +455,30 @@
         else:
             raise NotImplementedError
 
+def cached_property(func):
+    n = '_%s' % func.func_name
+    def cached_func(self):
+        if self._cache and getattr(self, n, None) is not None:
+            return getattr(self, n)
+        if self.data_size is None:
+            tr = self._accumulate_values(n[1:])
+        else:
+            tr = func(self)
+        if self._cache:
+            setattr(self, n, tr)
+        return tr
+    return property(cached_func)
+
 class YTDataChunk(object):
 
-    def __init__(self, dobj, chunk_type, objs, data_size = None, field_type = None):
+    def __init__(self, dobj, chunk_type, objs, data_size = None,
+                 field_type = None, cache = False):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
-        self._data_size = data_size
+        self.data_size = data_size
         self._field_type = field_type
-
-    @property
-    def data_size(self):
-        if callable(self._data_size):
-            self._data_size = self._data_size(self.dobj, self.objs)
-        return self._data_size
+        self._cache = cache
 
     def _accumulate_values(self, method):
         # We call this generically.  It's somewhat slower, since we're doing
@@ -476,35 +489,25 @@
             f = getattr(obj, mname)
             arrs.append(f(self.dobj))
         arrs = np.concatenate(arrs)
-        self._data_size = arrs.shape[0]
+        self.data_size = arrs.shape[0]
         return arrs
 
-    _fcoords = None
-    @property
+    @cached_property
     def fcoords(self):
-        if self.data_size is None:
-            self._fcoords = self._accumulate_values("fcoords")
-        if self._fcoords is not None: return self._fcoords
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fcoords = ci
-        if self.data_size == 0: return self._fcoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fcoords(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
             ind += c.shape[0]
-        return self._fcoords
+        return ci
 
-    _icoords = None
-    @property
+    @cached_property
     def icoords(self):
-        if self.data_size is None:
-            self._icoords = self._accumulate_values("icoords")
-        if self._icoords is not None: return self._icoords
         ci = np.empty((self.data_size, 3), dtype='int64')
-        self._icoords = ci
-        if self.data_size == 0: return self._icoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_icoords(self.dobj)
@@ -513,15 +516,10 @@
             ind += c.shape[0]
         return ci
 
-    _fwidth = None
-    @property
+    @cached_property
     def fwidth(self):
-        if self.data_size is None:
-            self._fwidth = self._accumulate_values("fwidth")
-        if self._fwidth is not None: return self._fwidth
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fwidth = ci
-        if self.data_size == 0: return self._fwidth
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fwidth(self.dobj)
@@ -530,15 +528,10 @@
             ind += c.shape[0]
         return ci
 
-    _ires = None
-    @property
+    @cached_property
     def ires(self):
-        if self.data_size is None:
-            self._ires = self._accumulate_values("ires")
-        if self._ires is not None: return self._ires
         ci = np.empty(self.data_size, dtype='int64')
-        self._ires = ci
-        if self.data_size == 0: return self._ires
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_ires(self.dobj)
@@ -547,22 +540,17 @@
             ind += c.size
         return ci
 
-    _tcoords = None
-    @property
+    @cached_property
     def tcoords(self):
-        if self._tcoords is None:
-            self.dtcoords
+        self.dtcoords
         return self._tcoords
 
-    _dtcoords = None
-    @property
+    @cached_property
     def dtcoords(self):
-        if self._dtcoords is not None: return self._dtcoords
         ct = np.empty(self.data_size, dtype='float64')
         cdt = np.empty(self.data_size, dtype='float64')
-        self._tcoords = ct
-        self._dtcoords = cdt
-        if self.data_size == 0: return self._dtcoords
+        self._tcoords = ct # Se this for tcoords
+        if self.data_size == 0: return cdt
         ind = 0
         for obj in self.objs:
             gdt, gt = obj.tcoords(self.dobj)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -245,16 +245,16 @@
             dobj.size = self._count_selection(dobj)
         if getattr(dobj, "shape", None) is None:
             dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+        dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
 
     def _count_selection(self, dobj, grids = None):
         if grids is None: grids = dobj._chunk_info
         count = sum((g.count(dobj.selector) for g in grids))
         return count
 
-    def _chunk_all(self, dobj):
+    def _chunk_all(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
     def _chunk_spatial(self, dobj, ngz, sort = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -271,13 +271,16 @@
                 g = og
             size = self._count_selection(dobj, [og])
             if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            # We don't want to cache any of the masks or icoords or fcoords for
+            # individual grids.
+            yield YTDataChunk(dobj, "spatial", [g], size, cache = False)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         gfiles = defaultdict(list)
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for g in gobjs:
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
             gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs))
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -170,10 +170,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g])
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class ParticleDataChunk(YTDataChunk):
     def __init__(self, oct_handler, regions, *args, **kwargs):

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -261,6 +261,10 @@
                         self.visit(o.children[cind(i,j,k)], counts, level + 1)
         return
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]
     cdef np.float64_t dds[3]
@@ -282,7 +286,14 @@
         for i in range(nfiles/64 + 1):
             self.masks.append(np.zeros(dims, dtype="uint64"))
 
-    def add_data_file(self, np.ndarray[np.float64_t, ndim=2] pos, int file_id):
+    def add_data_file(self, np.ndarray pos, int file_id):
+        if pos.dtype == np.float32:
+            self._mask_positions[np.float32_t](pos, file_id)
+        elif pos.dtype == np.float64:
+            self._mask_positions[np.float64_t](pos, file_id)
+
+    cdef void _mask_positions(self, np.ndarray[anyfloat, ndim=2] pos,
+                              int file_id):
         cdef np.int64_t no = pos.shape[0]
         cdef np.int64_t p
         cdef int ind[3], i

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1124,6 +1124,8 @@
 
     def __init__(self, dobj):
         self.base_selector = dobj.base_selector
+        self.min_level = self.base_selector.min_level
+        self.max_level = self.base_selector.max_level
         self.domain_id = dobj.domain_id
         self.overlap_cells = 1
 
@@ -1175,6 +1177,8 @@
         self.min_ind = dobj.min_ind
         self.max_ind = dobj.max_ind
         self.base_selector = dobj.base_selector
+        self.min_level = self.base_selector.min_level
+        self.max_level = self.base_selector.max_level
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -77,6 +77,10 @@
     def __str__(self):
         return "Could not find field '%s' in %s." % (self.fname, self.pf)
 
+class YTCouldNotGenerateField(YTFieldNotFound):
+    def __str__(self):
+        return "Could field '%s' in %s could not be generated." % (self.fname, self.pf)
+
 class YTFieldTypeNotFound(YTException):
     def __init__(self, fname):
         self.fname = fname

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -131,7 +131,7 @@
         cdef int i, j
         cdef QuadTreeNode *node
         cdef np.int64_t pos[2]
-        cdef np.float64_t *vals = <np.float64_t *> alloca(
+        cdef np.float64_t *vals = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*nvals)
         cdef np.float64_t weight_val = 0.0
         self.nvals = nvals
@@ -160,6 +160,7 @@
                 self.root_nodes[i][j] = QTN_initialize(
                     pos, nvals, vals, weight_val)
         self.num_cells = self.top_grid_dims[0] * self.top_grid_dims[1]
+        free(vals)
 
     cdef int count_total_cells(self, QuadTreeNode *root):
         cdef int total = 0
@@ -373,7 +374,7 @@
         cdef np.float64_t *vdata = <np.float64_t *> nvals.data
         cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
         cdef np.float64_t wtoadd
-        cdef np.float64_t *vtoadd = <np.float64_t *> alloca(
+        cdef np.float64_t *vtoadd = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*self.nvals)
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
@@ -381,6 +382,7 @@
                 wtoadd = 0.0
                 curpos += self.fill(self.root_nodes[i][j],
                     curpos, px, py, pdx, pdy, vdata, wdata, vtoadd, wtoadd, 0)
+        free(vtoadd)
         return opx, opy, opdx, opdy, nvals, nwvals
 
     cdef int count(self, QuadTreeNode *node):
@@ -406,7 +408,7 @@
                         np.int64_t level):
         cdef int i, j, n
         cdef np.float64_t *vorig
-        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
+        vorig = <np.float64_t *> malloc(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -444,6 +446,7 @@
             for i in range(self.nvals):
                 vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
+        free(vorig)
         return added
 
     @cython.boundscheck(False)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -353,6 +353,51 @@
         morton_indices[i] = mi
     return morton_indices
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
+cdef position_to_morton(np.ndarray[anyfloat, ndim=1] pos_x,
+                        np.ndarray[anyfloat, ndim=1] pos_y,
+                        np.ndarray[anyfloat, ndim=1] pos_z,
+                        np.float64_t dds[3], np.float64_t DLE[3],
+                        np.ndarray[np.uint64_t, ndim=1] ind):
+    cdef np.uint64_t mi, ii[3]
+    cdef np.float64_t p[3]
+    cdef np.int64_t i, j
+    for i in range(pos_x.shape[0]):
+        p[0] = <np.float64_t> pos_x[i]
+        p[1] = <np.float64_t> pos_y[i]
+        p[2] = <np.float64_t> pos_z[i]
+        for j in range(3):
+            ii[j] = <np.uint64_t> ((p[j] - DLE[j])/dds[j])
+        mi = 0
+        mi |= spread_bits(ii[2])<<0
+        mi |= spread_bits(ii[1])<<1
+        mi |= spread_bits(ii[0])<<2
+        ind[i] = mi
+
+DEF ORDER_MAX=20
+        
+def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z,
+                   domain_left_edge, domain_right_edge):
+    cdef int i
+    cdef np.float64_t dds[3], DLE[3], DRE[3]
+    for i in range(3):
+        DLE[i] = domain_left_edge[i]
+        DRE[i] = domain_right_edge[i]
+        dds[i] = (DRE[i] - DLE[i]) / (1 << ORDER_MAX)
+    cdef np.ndarray[np.uint64_t, ndim=1] ind
+    ind = np.zeros(pos_x.shape[0], dtype="uint64")
+    if pos_x.dtype == np.float32:
+        position_to_morton[np.float32_t](pos_x, pos_y, pos_z, dds, DLE, ind)
+    elif pos_x.dtype == np.float64:
+        position_to_morton[np.float64_t](pos_x, pos_y, pos_z, dds, DLE, ind)
+    else:
+        print "Could not identify dtype.", pos_x.dtype
+        raise NotImplementedError
+    return ind
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list