[yt-svn] commit/yt: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Mar 28 12:20:05 PDT 2014


9 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/558114958e96/
Changeset:   558114958e96
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-26 21:26:29
Summary:     Fixing bad regex, index_information -> hierarchy_information.  Also adding support when MassUnits are not specified (as what happens in InMemory datasets).
Affected #:  2 files

diff -r f488edc6c4b09e7055e82cc7f99e6240e259bf84 -r 558114958e969919490ae83cf51f4a1ea1294ae3 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -532,7 +532,7 @@
         self.dataset_type = dataset_type
         self.float_type = 'float64'
         self.parameter_file = weakref.proxy(pf) # for _obtain_enzo
-        self.float_type = self.enzo.index_information["GridLeftEdge"].dtype
+        self.float_type = self.enzo.hierarchy_information["GridLeftEdge"].dtype
         self.directory = os.getcwd()
         GridIndex.__init__(self, pf, dataset_type)
 
@@ -540,12 +540,12 @@
         pass
 
     def _count_grids(self):
-        self.num_grids = self.enzo.index_information["GridDimensions"].shape[0]
+        self.num_grids = self.enzo.hierarchy_information["GridDimensions"].shape[0]
 
     def _parse_index(self):
         self._copy_index_structure()
         mylog.debug("Copying reverse tree")
-        reverse_tree = self.enzo.index_information["GridParentIDs"].ravel().tolist()
+        reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
         # Initial setup:
         mylog.debug("Reconstructing parent-child relationships")
         grids = []
@@ -574,14 +574,14 @@
 
     def _copy_index_structure(self):
         # Dimensions are important!
-        self.grid_dimensions[:] = self.enzo.index_information["GridEndIndices"][:]
-        self.grid_dimensions -= self.enzo.index_information["GridStartIndices"][:]
+        self.grid_dimensions[:] = self.enzo.hierarchy_information["GridEndIndices"][:]
+        self.grid_dimensions -= self.enzo.hierarchy_information["GridStartIndices"][:]
         self.grid_dimensions += 1
-        self.grid_left_edge[:] = self.enzo.index_information["GridLeftEdge"][:]
-        self.grid_right_edge[:] = self.enzo.index_information["GridRightEdge"][:]
-        self.grid_levels[:] = self.enzo.index_information["GridLevels"][:]
-        self.grid_procs = self.enzo.index_information["GridProcs"].copy()
-        self.grid_particle_count[:] = self.enzo.index_information["GridNumberOfParticles"][:]
+        self.grid_left_edge[:] = self.enzo.hierarchy_information["GridLeftEdge"][:]
+        self.grid_right_edge[:] = self.enzo.hierarchy_information["GridRightEdge"][:]
+        self.grid_levels[:] = self.enzo.hierarchy_information["GridLevels"][:]
+        self.grid_procs = self.enzo.hierarchy_information["GridProcs"].copy()
+        self.grid_particle_count[:] = self.enzo.hierarchy_information["GridNumberOfParticles"][:]
 
     def save_data(self, *args, **kwargs):
         pass
@@ -829,13 +829,16 @@
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
-                mass_unit = self.parameters["MassUnits"]
                 time_unit = self.parameters["TimeUnits"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
+                length_unit = mass_unit = time_unit = 1.0
+            if "MassUnits" in self.parameters:
+                mass_unit = self.parameters["MassUnits"]
+            else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 g")
-                length_unit = mass_unit = time_unit = 1.0
+                mass_unit = 1.0
 
             self.length_unit = self.quan(length_unit, "cm")
             self.mass_unit = self.quan(mass_unit, "g")

diff -r f488edc6c4b09e7055e82cc7f99e6240e259bf84 -r 558114958e969919490ae83cf51f4a1ea1294ae3 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -246,7 +246,7 @@
         coef1 = max((grid.Time - t1)/(grid.Time - t2), 0.0)
         coef2 = 1.0 - coef1
         t1 = enzo.yt_parameter_file["InitialTime"]
-        t2 = enzo.index_information["GridOldTimes"][grid.id]
+        t2 = enzo.hierarchy_information["GridOldTimes"][grid.id]
         return (coef1*self.grids_in_memory[grid.id][field] + \
                 coef2*self.old_grids_in_memory[grid.id][field])\
                 [self.my_slice]


https://bitbucket.org/yt_analysis/yt/commits/4fac877ce1f7/
Changeset:   4fac877ce1f7
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-03-27 19:15:03
Summary:     Making sure to add field type enzo to field list entries for in memory datasets.
Affected #:  2 files

diff -r 558114958e969919490ae83cf51f4a1ea1294ae3 -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -829,16 +829,12 @@
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
+                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                 time_unit = self.parameters["TimeUnits"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
                 length_unit = mass_unit = time_unit = 1.0
-            if "MassUnits" in self.parameters:
-                mass_unit = self.parameters["MassUnits"]
-            else:
-                mylog.warning("Setting 1.0 in code units to be 1.0 g")
-                mass_unit = 1.0
 
             self.length_unit = self.quan(length_unit, "cm")
             self.mass_unit = self.quan(mass_unit, "g")
@@ -902,6 +898,7 @@
         return obj
 
     def __init__(self, parameter_override=None, conversion_override=None):
+        self.fluid_types += ("enzo",)
         if parameter_override is None: parameter_override = {}
         self._parameter_override = parameter_override
         if conversion_override is None: conversion_override = {}

diff -r 558114958e969919490ae83cf51f4a1ea1294ae3 -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -255,7 +255,7 @@
         return field.swapaxes(0,2)
 
     def _read_field_names(self, grid):
-        return self.grids_in_memory[grid.id].keys()
+        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(3,-3), slice(3,-3), slice(3,-3)]


https://bitbucket.org/yt_analysis/yt/commits/f71f0088d928/
Changeset:   f71f0088d928
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-03-27 19:20:05
Summary:     Mergins
Affected #:  7 files

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -279,7 +279,10 @@
                 self, dataset_type=self.dataset_type)
             # Now we do things that we need an instantiated index for
             # ...first off, we create our field_info now.
+            oldsettings = np.geterr()
+            np.seterr(all='ignore')
             self.create_field_info()
+            np.seterr(**oldsettings)
         return self._instantiated_index
     
     _index_proxy = None
@@ -361,11 +364,16 @@
         # No string lookups here, we need an actual union.
         f = self.particle_fields_by_type
         fields = set_intersection([f[s] for s in union
-                                   if s in self.particle_types_raw])
+                                   if s in self.particle_types_raw
+                                   and len(f[s]) > 0])
         for field in fields:
             units = set([])
             for s in union:
-                units.add(self.field_units.get((s, field), ""))
+                # First we check our existing fields for units
+                funits = self._get_field_info(s, field).units
+                # Then we override with field_units settings.
+                funits = self.field_units.get((s, field), funits)
+                units.add(funits)
             if len(units) == 1:
                 self.field_units[union.name, field] = list(units)[0]
         self.particle_types += (union.name,)

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -188,6 +188,8 @@
             return rv
         elif param == "fof_groups":
             return None
+        elif param == "mu":
+            return 1.0
         else:
             return 0.0
 

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -40,6 +40,7 @@
     def pyfits(self):
         if self._pyfits is None:
             import astropy.io.fits as pyfits
+            self.log
             self._pyfits = pyfits
         return self._pyfits
 
@@ -48,6 +49,7 @@
     def pywcs(self):
         if self._pywcs is None:
             import astropy.wcs as pywcs
+            self.log
             self._pywcs = pywcs
         return self._pywcs
 

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -498,6 +498,13 @@
             raise RuntimeError
         new_data[new_field] = data[field]
         field_units[new_field] = field_units.pop(field)
+        known_fields = StreamFieldInfo.known_particle_fields \
+                     + StreamFieldInfo.known_other_fields
+        # We do not want to override any of the known ones, if it's not
+        # overridden here.
+        if any(f[0] == new_field[1] for f in known_fields) and \
+           field_units[new_field] == "":
+            field_units.pop(new_field)
     data = new_data
     return field_units, data
 

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -52,13 +52,7 @@
             raise NotImplementedError
         rv = {}
         for field in fields:
-            ftype, fname = field
-            try:
-                field_units = self.field_units[fname]
-            except KeyError:
-                field_units = self.field_units[field]
-            rv[field] = self.pf.arr(np.empty(size, dtype="float64"),
-                                    field_units)
+            rv[field] = self.pf.arr(np.empty(size, dtype="float64"))
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -93,6 +93,10 @@
         item = self.data_source._determine_fields(item)[0]
         return dict.__getitem__(self, item)
 
+    def __contains__(self, item):
+        item = self.data_source._determine_fields(item)[0]
+        return dict.__contains__(self, item)
+
     def __init__(self, data_source, *args):
         self.data_source = data_source
         return dict.__init__(self, args)

diff -r 4fac877ce1f740fdac58f6b7aaf33e41809d6543 -r f71f0088d9280e942289de6f43bd76d59a244771 yt/visualization/tests/test_callbacks.py
--- a/yt/visualization/tests/test_callbacks.py
+++ b/yt/visualization/tests/test_callbacks.py
@@ -163,5 +163,4 @@
         p.annotate_grids(alpha=0.7, min_pix=10, min_pix_ids=30,
             draw_ids=True, periodic=False, min_level=2,
             max_level=3, cmap="gist_stern")
-        p.save()
-
+        p.save(prefix)


https://bitbucket.org/yt_analysis/yt/commits/ac0bcdfd5e2c/
Changeset:   ac0bcdfd5e2c
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-03-28 17:30:52
Summary:     Making sure we cut out the ghost zones for in memory grids and removing some unused functions.
Affected #:  1 file

diff -r f71f0088d9280e942289de6f43bd76d59a244771 -r ac0bcdfd5e2cebb897eb680c9ac5224d401924cb yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -233,38 +233,9 @@
                       slice(ghost_zones,-ghost_zones))
         BaseIOHandler.__init__(self, pf)
 
-    def _read_data_set(self, grid, field):
-        if grid.id not in self.grids_in_memory:
-            mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
-            raise KeyError
-        tr = self.grids_in_memory[grid.id][field]
-        # If it's particles, we copy.
-        if len(tr.shape) == 1: return tr.copy()
-        # New in-place unit conversion breaks if we don't copy first
-        return tr.swapaxes(0,2)[self.my_slice].copy()
-        # We don't do this, because we currently do not interpolate
-        coef1 = max((grid.Time - t1)/(grid.Time - t2), 0.0)
-        coef2 = 1.0 - coef1
-        t1 = enzo.yt_parameter_file["InitialTime"]
-        t2 = enzo.hierarchy_information["GridOldTimes"][grid.id]
-        return (coef1*self.grids_in_memory[grid.id][field] + \
-                coef2*self.old_grids_in_memory[grid.id][field])\
-                [self.my_slice]
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
     def _read_field_names(self, grid):
         return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
-        sl[axis] = slice(coord + 3, coord + 4)
-        sl = tuple(reversed(sl))
-        tr = self.grids_in_memory[grid.id][field][sl].swapaxes(0,2)
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
         # Now we have to do something unpleasant
@@ -292,12 +263,9 @@
         for chunk in chunks:
             for g in chunk.objs:
                 if g.id not in self.grids_in_memory: continue
-
-                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
-                data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    data_view = self.grids_in_memory[g.id][fname]
+                    data_view = self.grids_in_memory[g.id][fname][self.my_slice]
                     nd = g.select(selector, data_view, rv[field], ind)
         return rv
 
@@ -333,10 +301,6 @@
                             data = data * g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
 
-    @property
-    def _read_exception(self):
-        return KeyError
-
 class IOHandlerPacked2D(IOHandlerPackedHDF5):
 
     _dataset_type = "enzo_packed_2d"


https://bitbucket.org/yt_analysis/yt/commits/c61681e948ce/
Changeset:   c61681e948ce
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-03-28 18:48:44
Summary:     Fixing indexing bug for in memory datasets.  Cheers to el presidente Matt for solving this.
Affected #:  1 file

diff -r ac0bcdfd5e2cebb897eb680c9ac5224d401924cb -r c61681e948ce9362a7dec40c8a7c3b51c7ddf9f8 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -266,7 +266,7 @@
                 for field in fields:
                     ftype, fname = field
                     data_view = self.grids_in_memory[g.id][fname][self.my_slice]
-                    nd = g.select(selector, data_view, rv[field], ind)
+                    ind += g.select(selector, data_view, rv[field], ind)
         return rv
 
     def _read_particle_coords(self, chunks, ptf):


https://bitbucket.org/yt_analysis/yt/commits/8bcd7646b8b9/
Changeset:   8bcd7646b8b9
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-26 21:54:34
Summary:     Merging from main.
Affected #:  4 files

diff -r 558114958e969919490ae83cf51f4a1ea1294ae3 -r 8bcd7646b8b900e50c529865f35a4a0da882192d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -279,7 +279,10 @@
                 self, dataset_type=self.dataset_type)
             # Now we do things that we need an instantiated index for
             # ...first off, we create our field_info now.
+            oldsettings = np.geterr()
+            np.seterr(all='ignore')
             self.create_field_info()
+            np.seterr(**oldsettings)
         return self._instantiated_index
     
     _index_proxy = None

diff -r 558114958e969919490ae83cf51f4a1ea1294ae3 -r 8bcd7646b8b900e50c529865f35a4a0da882192d yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -188,6 +188,8 @@
             return rv
         elif param == "fof_groups":
             return None
+        elif param == "mu":
+            return 1.0
         else:
             return 0.0
 

diff -r 558114958e969919490ae83cf51f4a1ea1294ae3 -r 8bcd7646b8b900e50c529865f35a4a0da882192d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -40,6 +40,7 @@
     def pyfits(self):
         if self._pyfits is None:
             import astropy.io.fits as pyfits
+            self.log
             self._pyfits = pyfits
         return self._pyfits
 
@@ -48,6 +49,7 @@
     def pywcs(self):
         if self._pywcs is None:
             import astropy.wcs as pywcs
+            self.log
             self._pywcs = pywcs
         return self._pywcs
 

diff -r 558114958e969919490ae83cf51f4a1ea1294ae3 -r 8bcd7646b8b900e50c529865f35a4a0da882192d yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -93,6 +93,10 @@
         item = self.data_source._determine_fields(item)[0]
         return dict.__getitem__(self, item)
 
+    def __contains__(self, item):
+        item = self.data_source._determine_fields(item)[0]
+        return dict.__contains__(self, item)
+
     def __init__(self, data_source, *args):
         self.data_source = data_source
         return dict.__init__(self, args)


https://bitbucket.org/yt_analysis/yt/commits/2fadb65007ac/
Changeset:   2fadb65007ac
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-27 19:40:49
Summary:     Merging in Brittons fix
Affected #:  2 files

diff -r 8bcd7646b8b900e50c529865f35a4a0da882192d -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -829,16 +829,12 @@
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
+                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                 time_unit = self.parameters["TimeUnits"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
                 length_unit = mass_unit = time_unit = 1.0
-            if "MassUnits" in self.parameters:
-                mass_unit = self.parameters["MassUnits"]
-            else:
-                mylog.warning("Setting 1.0 in code units to be 1.0 g")
-                mass_unit = 1.0
 
             self.length_unit = self.quan(length_unit, "cm")
             self.mass_unit = self.quan(mass_unit, "g")
@@ -902,6 +898,7 @@
         return obj
 
     def __init__(self, parameter_override=None, conversion_override=None):
+        self.fluid_types += ("enzo",)
         if parameter_override is None: parameter_override = {}
         self._parameter_override = parameter_override
         if conversion_override is None: conversion_override = {}

diff -r 8bcd7646b8b900e50c529865f35a4a0da882192d -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -255,7 +255,7 @@
         return field.swapaxes(0,2)
 
     def _read_field_names(self, grid):
-        return self.grids_in_memory[grid.id].keys()
+        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(3,-3), slice(3,-3), slice(3,-3)]


https://bitbucket.org/yt_analysis/yt/commits/a1526235d0e7/
Changeset:   a1526235d0e7
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-03-28 18:56:48
Summary:     Merging.
Affected #:  5 files

diff -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 -r a1526235d0e7a63aa87406d1ba5a1e58edccb876 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -364,11 +364,16 @@
         # No string lookups here, we need an actual union.
         f = self.particle_fields_by_type
         fields = set_intersection([f[s] for s in union
-                                   if s in self.particle_types_raw])
+                                   if s in self.particle_types_raw
+                                   and len(f[s]) > 0])
         for field in fields:
             units = set([])
             for s in union:
-                units.add(self.field_units.get((s, field), ""))
+                # First we check our existing fields for units
+                funits = self._get_field_info(s, field).units
+                # Then we override with field_units settings.
+                funits = self.field_units.get((s, field), funits)
+                units.add(funits)
             if len(units) == 1:
                 self.field_units[union.name, field] = list(units)[0]
         self.particle_types += (union.name,)

diff -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 -r a1526235d0e7a63aa87406d1ba5a1e58edccb876 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -233,38 +233,9 @@
                       slice(ghost_zones,-ghost_zones))
         BaseIOHandler.__init__(self, pf)
 
-    def _read_data_set(self, grid, field):
-        if grid.id not in self.grids_in_memory:
-            mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
-            raise KeyError
-        tr = self.grids_in_memory[grid.id][field]
-        # If it's particles, we copy.
-        if len(tr.shape) == 1: return tr.copy()
-        # New in-place unit conversion breaks if we don't copy first
-        return tr.swapaxes(0,2)[self.my_slice].copy()
-        # We don't do this, because we currently do not interpolate
-        coef1 = max((grid.Time - t1)/(grid.Time - t2), 0.0)
-        coef2 = 1.0 - coef1
-        t1 = enzo.yt_parameter_file["InitialTime"]
-        t2 = enzo.hierarchy_information["GridOldTimes"][grid.id]
-        return (coef1*self.grids_in_memory[grid.id][field] + \
-                coef2*self.old_grids_in_memory[grid.id][field])\
-                [self.my_slice]
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
     def _read_field_names(self, grid):
         return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
-        sl[axis] = slice(coord + 3, coord + 4)
-        sl = tuple(reversed(sl))
-        tr = self.grids_in_memory[grid.id][field][sl].swapaxes(0,2)
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
         # Now we have to do something unpleasant
@@ -292,13 +263,10 @@
         for chunk in chunks:
             for g in chunk.objs:
                 if g.id not in self.grids_in_memory: continue
-
-                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
-                data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    data_view = self.grids_in_memory[g.id][fname]
-                    nd = g.select(selector, data_view, rv[field], ind)
+                    data_view = self.grids_in_memory[g.id][fname][self.my_slice]
+                    ind += g.select(selector, data_view, rv[field], ind)
         return rv
 
     def _read_particle_coords(self, chunks, ptf):
@@ -333,10 +301,6 @@
                             data = data * g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
 
-    @property
-    def _read_exception(self):
-        return KeyError
-
 class IOHandlerPacked2D(IOHandlerPackedHDF5):
 
     _dataset_type = "enzo_packed_2d"

diff -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 -r a1526235d0e7a63aa87406d1ba5a1e58edccb876 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -498,6 +498,13 @@
             raise RuntimeError
         new_data[new_field] = data[field]
         field_units[new_field] = field_units.pop(field)
+        known_fields = StreamFieldInfo.known_particle_fields \
+                     + StreamFieldInfo.known_other_fields
+        # We do not want to override any of the known ones, if it's not
+        # overridden here.
+        if any(f[0] == new_field[1] for f in known_fields) and \
+           field_units[new_field] == "":
+            field_units.pop(new_field)
     data = new_data
     return field_units, data
 

diff -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 -r a1526235d0e7a63aa87406d1ba5a1e58edccb876 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -52,13 +52,7 @@
             raise NotImplementedError
         rv = {}
         for field in fields:
-            ftype, fname = field
-            try:
-                field_units = self.field_units[fname]
-            except KeyError:
-                field_units = self.field_units[field]
-            rv[field] = self.pf.arr(np.empty(size, dtype="float64"),
-                                    field_units)
+            rv[field] = self.pf.arr(np.empty(size, dtype="float64"))
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)

diff -r 2fadb65007ac5fed37b80dc9d4100b3e0f6386a6 -r a1526235d0e7a63aa87406d1ba5a1e58edccb876 yt/visualization/tests/test_callbacks.py
--- a/yt/visualization/tests/test_callbacks.py
+++ b/yt/visualization/tests/test_callbacks.py
@@ -163,5 +163,4 @@
         p.annotate_grids(alpha=0.7, min_pix=10, min_pix_ids=30,
             draw_ids=True, periodic=False, min_level=2,
             max_level=3, cmap="gist_stern")
-        p.save()
-
+        p.save(prefix)


https://bitbucket.org/yt_analysis/yt/commits/19470c49dd8c/
Changeset:   19470c49dd8c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-28 20:19:56
Summary:     Merged in brittonsmith/yt/yt-3.0 (pull request #777)

Fixing in memory Enzo datasets.
Affected #:  2 files

diff -r ff4b94238c78fb1f97e4d3cb535274b5fed23049 -r 19470c49dd8c1c5c66ee7a8bd638940761b5d3e9 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -532,7 +532,7 @@
         self.dataset_type = dataset_type
         self.float_type = 'float64'
         self.parameter_file = weakref.proxy(pf) # for _obtain_enzo
-        self.float_type = self.enzo.index_information["GridLeftEdge"].dtype
+        self.float_type = self.enzo.hierarchy_information["GridLeftEdge"].dtype
         self.directory = os.getcwd()
         GridIndex.__init__(self, pf, dataset_type)
 
@@ -540,12 +540,12 @@
         pass
 
     def _count_grids(self):
-        self.num_grids = self.enzo.index_information["GridDimensions"].shape[0]
+        self.num_grids = self.enzo.hierarchy_information["GridDimensions"].shape[0]
 
     def _parse_index(self):
         self._copy_index_structure()
         mylog.debug("Copying reverse tree")
-        reverse_tree = self.enzo.index_information["GridParentIDs"].ravel().tolist()
+        reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
         # Initial setup:
         mylog.debug("Reconstructing parent-child relationships")
         grids = []
@@ -574,14 +574,14 @@
 
     def _copy_index_structure(self):
         # Dimensions are important!
-        self.grid_dimensions[:] = self.enzo.index_information["GridEndIndices"][:]
-        self.grid_dimensions -= self.enzo.index_information["GridStartIndices"][:]
+        self.grid_dimensions[:] = self.enzo.hierarchy_information["GridEndIndices"][:]
+        self.grid_dimensions -= self.enzo.hierarchy_information["GridStartIndices"][:]
         self.grid_dimensions += 1
-        self.grid_left_edge[:] = self.enzo.index_information["GridLeftEdge"][:]
-        self.grid_right_edge[:] = self.enzo.index_information["GridRightEdge"][:]
-        self.grid_levels[:] = self.enzo.index_information["GridLevels"][:]
-        self.grid_procs = self.enzo.index_information["GridProcs"].copy()
-        self.grid_particle_count[:] = self.enzo.index_information["GridNumberOfParticles"][:]
+        self.grid_left_edge[:] = self.enzo.hierarchy_information["GridLeftEdge"][:]
+        self.grid_right_edge[:] = self.enzo.hierarchy_information["GridRightEdge"][:]
+        self.grid_levels[:] = self.enzo.hierarchy_information["GridLevels"][:]
+        self.grid_procs = self.enzo.hierarchy_information["GridProcs"].copy()
+        self.grid_particle_count[:] = self.enzo.hierarchy_information["GridNumberOfParticles"][:]
 
     def save_data(self, *args, **kwargs):
         pass
@@ -829,12 +829,11 @@
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
-                mass_unit = self.parameters["MassUnits"]
+                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                 time_unit = self.parameters["TimeUnits"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
-                mylog.warning("Setting 1.0 in code units to be 1.0 g")
                 length_unit = mass_unit = time_unit = 1.0
 
             self.length_unit = self.quan(length_unit, "cm")
@@ -899,6 +898,7 @@
         return obj
 
     def __init__(self, parameter_override=None, conversion_override=None):
+        self.fluid_types += ("enzo",)
         if parameter_override is None: parameter_override = {}
         self._parameter_override = parameter_override
         if conversion_override is None: conversion_override = {}

diff -r ff4b94238c78fb1f97e4d3cb535274b5fed23049 -r 19470c49dd8c1c5c66ee7a8bd638940761b5d3e9 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -233,37 +233,8 @@
                       slice(ghost_zones,-ghost_zones))
         BaseIOHandler.__init__(self, pf)
 
-    def _read_data_set(self, grid, field):
-        if grid.id not in self.grids_in_memory:
-            mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
-            raise KeyError
-        tr = self.grids_in_memory[grid.id][field]
-        # If it's particles, we copy.
-        if len(tr.shape) == 1: return tr.copy()
-        # New in-place unit conversion breaks if we don't copy first
-        return tr.swapaxes(0,2)[self.my_slice].copy()
-        # We don't do this, because we currently do not interpolate
-        coef1 = max((grid.Time - t1)/(grid.Time - t2), 0.0)
-        coef2 = 1.0 - coef1
-        t1 = enzo.yt_parameter_file["InitialTime"]
-        t2 = enzo.index_information["GridOldTimes"][grid.id]
-        return (coef1*self.grids_in_memory[grid.id][field] + \
-                coef2*self.old_grids_in_memory[grid.id][field])\
-                [self.my_slice]
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
     def _read_field_names(self, grid):
-        return self.grids_in_memory[grid.id].keys()
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
-        sl[axis] = slice(coord + 3, coord + 4)
-        sl = tuple(reversed(sl))
-        tr = self.grids_in_memory[grid.id][field][sl].swapaxes(0,2)
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
+        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -292,13 +263,10 @@
         for chunk in chunks:
             for g in chunk.objs:
                 if g.id not in self.grids_in_memory: continue
-
-                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
-                data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    data_view = self.grids_in_memory[g.id][fname]
-                    nd = g.select(selector, data_view, rv[field], ind)
+                    data_view = self.grids_in_memory[g.id][fname][self.my_slice]
+                    ind += g.select(selector, data_view, rv[field], ind)
         return rv
 
     def _read_particle_coords(self, chunks, ptf):
@@ -333,10 +301,6 @@
                             data = data * g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
 
-    @property
-    def _read_exception(self):
-        return KeyError
-
 class IOHandlerPacked2D(IOHandlerPackedHDF5):
 
     _dataset_type = "enzo_packed_2d"

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list