[yt-svn] commit/yt: MatthewTurk: Merged in atmyers/yt-fixes/yt-3.0 (pull request #858)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sun May 4 07:57:02 PDT 2014


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/7de110e64df8/
Changeset:   7de110e64df8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-04 16:56:54
Summary:     Merged in atmyers/yt-fixes/yt-3.0 (pull request #858)

Chombo frontend Refactor
Affected #:  5 files

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -16,11 +16,12 @@
 from .data_structures import \
       ChomboGrid, \
       ChomboHierarchy, \
-      ChomboDataset
+      ChomboDataset, \
+      Orion2Hierarchy, \
+      Orion2Dataset
 
 from .fields import \
       ChomboFieldInfo
-add_chombo_field = ChomboFieldInfo.add_field
 
 from .io import \
       IOHandlerChomboHDF5

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -27,11 +27,6 @@
 from stat import \
      ST_CTIME
 
-from .definitions import \
-     chombo2enzoDict, \
-     yt2chomboFieldsDict, \
-     parameterDict \
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
      AMRGridPatch
@@ -48,7 +43,7 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import ChomboFieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -56,8 +51,8 @@
     def __init__(self, id, index, level, start, stop):
         AMRGridPatch.__init__(self, id, filename = index.index_filename,
                               index = index)
-        self.Parent = []
-        self.Children = []
+        self._parent_id = []
+        self._children_ids = []
         self.Level = level
         self.ActiveDimensions = stop - start + 1
 
@@ -69,7 +64,7 @@
         """
         if self.start_index is not None:
             return self.start_index
-        if self.Parent == []:
+        if self.Parent is None:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
             return np.rint(start_index).astype('int64').ravel()
@@ -83,14 +78,33 @@
         # has already been read in and stored in index
         self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
 
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.index.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.index.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
+    _data_file = None
 
     def __init__(self,pf,dataset_type='chombo_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.dataset_type = dataset_type
+
+        if pf.dimensionality == 1:
+            self.dataset_type = "chombo1d_hdf5"
+        if pf.dimensionality == 2:
+            self.dataset_type = "chombo2d_hdf5"        
+
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the index file is the parameter file!
@@ -99,12 +113,267 @@
         self.directory = pf.fullpath
         self._handle = pf._handle
 
-        self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = self._handle.keys()[1:]
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self,pf,dataset_type)
+
         self._read_particles()
 
     def _read_particles(self):
+
+        # only do anything if the dataset contains particles
+        if not any([f[1].startswith('particle_') for f in self.field_list]):
+            return
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
+
+    # Chombo datasets, by themselves, have no "known" fields. However, 
+    # we will look for "fluid" fields by finding the string "component" in
+    # the output file, and "particle" fields by finding the string "particle".
+    def _detect_output_fields(self):
+
+        # look for fluid fields
+        output_fields = []
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith("component"):
+                output_fields.append(val)
+        self.field_list = [("chombo", c) for c in output_fields]
+
+        # look for particle fields
+        particle_fields = []
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith("particle"):
+                particle_fields.append(val)
+        self.field_list.extend([("io", c) for c in particle_fields])        
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_index(self):
+        f = self._handle # shortcut
+        self.max_level = f.attrs['max_level']
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        self._reconstruct_parent_child()
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
+
+class ChomboDataset(Dataset):
+    _index_class = ChomboHierarchy
+    _field_info_class = ChomboFieldInfo
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self.fluid_types += ("chombo",)
+        self._handle = h5py.File(filename, 'r')
+
+        # look up the dimensionality of the dataset
+        D = self._handle['Chombo_global/'].attrs['SpaceDim']
+        if D == 1:
+            self.dataset_type = 'chombo1d_hdf5'
+        if D == 2:
+            self.dataset_type = 'chombo2d_hdf5'
+        if D == 3:
+            self.dataset_type = 'chombo_hdf5'
+
+        # some datasets will not be time-dependent, make
+        # sure we handle that here.
+        try:
+            self.current_time = self._handle.attrs['time']
+        except KeyError:
+            self.current_time = 0.0
+
+        self.geometry = "cartesian"
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        Dataset.__init__(self,filename, self.dataset_type)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'chombo'
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = YTQuantity(1.0, "cm")
+        self.mass_unit = YTQuantity(1.0, "g")
+        self.time_unit = YTQuantity(1.0, "s")
+        self.velocity_unit = YTQuantity(1.0, "cm/s")
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self._calc_left_edge()
+        self.domain_right_edge = self._calc_right_edge()
+        self.domain_dimensions = self._calc_domain_dimensions()
+
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
+        if self.dimensionality == 1:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True, True, True)
+
+    def _calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        return LE
+
+    def _calc_right_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        fileh.close()
+        return RE
+
+    def _calc_domain_dimensions(self):
+        fileh = self._handle
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Chombo_global" in fileh["/"]
+                # ORION2 simulations should always have this:
+                valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                fileh.close()
+                return valid
+            except:
+                pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+
+class Orion2Hierarchy(ChomboHierarchy):
+
+    def __init__(self, pf, dataset_type="orion_chombo_native"):
+        ChomboHierarchy.__init__(self, pf, dataset_type)
+
+    def _read_particles(self):
         self.particle_filename = self.index_filename[:-4] + 'sink'
         if not os.path.exists(self.particle_filename): return
         with open(self.particle_filename, 'r') as f:
@@ -132,104 +401,16 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
-    def _detect_output_fields(self):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        self.field_list = [("chombo", c[1]) for c in self._handle['/'].attrs.items()[-ncomp:]]
-          
-    def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
+class Orion2Dataset(ChomboDataset):
 
-    def _parse_index(self):
-        f = self._handle # shortcut
+    _index_class = Orion2Hierarchy
+    _field_info_class = Orion2FieldInfo
 
-        # this relies on the first Group in the H5 file being
-        # 'Chombo_global'
-        levels = f.keys()[1:]
-        grids = []
-        self.dds_list = []
-        i = 0
-        for lev in levels:
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            boxes = f[lev]['boxes'].value
-            dx = f[lev].attrs['dx']
-            self.dds_list.append(dx * np.ones(3))
-            for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(grids),self,level=level_number,
-                               start = si, stop = ei)
-                grids.append(pg)
-                grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        self.grids = np.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = np.array(self.grids, dtype='object')
+    def __init__(self, filename, dataset_type='orion_chombo_native',
+                 storage_filename = None, ini_filename = None):
 
-    def _populate_grid_objects(self):
-        self._reconstruct_parent_child()
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _reconstruct_parent_child(self):
-        mask = np.empty(len(self.grids), dtype='int32')
-        mylog.debug("First pass; identifying child grids")
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(self.grid_left_edge[i,:],
-                                self.grid_right_edge[i,:],
-                                self.grid_levels[i] + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            ids = np.where(mask.astype("bool")) # where is a tuple
-            grid._children_ids = ids[0] + grid._id_offset 
-        mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
-            for child in grid.Children:
-                child._parent_id.append(i + grid._id_offset)
-
-class ChomboDataset(Dataset):
-    _index_class = ChomboHierarchy
-    _field_info_class = ChomboFieldInfo
-
-    def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename = None, ini_filename = None):
-        self.fluid_types += ("chombo",)
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle.attrs['time']
-        self.ini_filename = ini_filename
-        self.fullplotdir = os.path.abspath(filename)
-        Dataset.__init__(self,filename,dataset_type)
-        self.storage_filename = storage_filename
-        self.cosmological_simulation = False
-
-        # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'chombo' # always PPM DE
-        self.parameters["DualEnergyFormalism"] = 0 
-        self.parameters["EOSType"] = -1 # default
-
-    def __del__(self):
-        self._handle.close()
-
-    def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
 
     def _parse_parameter_file(self):
         """
@@ -237,14 +418,19 @@
         exists in the plot file directory. If one does, attempt to parse it.
         Otherwise grab the dimensions from the hdf5 file.
         """
-        
-        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
+
+        orion2_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+        orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if orion2_ini_file_exists: self._parse_inputs_file('orion2.ini')
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.domain_left_edge = self.__calc_left_edge()
-        self.domain_right_edge = self.__calc_right_edge()
-        self.domain_dimensions = self.__calc_domain_dimensions()
         self.dimensionality = 3
+        self.domain_left_edge = self._calc_left_edge()
+        self.domain_right_edge = self._calc_right_edge()
+        self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self.periodicity = (True, True, True)
 
@@ -261,57 +447,33 @@
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
-            if chombo2enzoDict.has_key(param):
-                paramName = chombo2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    if paramName == "GAMMA":
-                        self.gamma = t[0]
-                    else:
-                        self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
-
-    def __calc_left_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        return LE
-
-    def __calc_right_edge(self):
-        fileh = h5py.File(self.parameter_filename,'r')
-        dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
-        fileh.close()
-        return RE
-
-    def __calc_domain_dimensions(self):
-        fileh = self._handle
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
-        return R_index - L_index
+            if param == "GAMMA":
+                self.gamma = vals
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if not os.path.isfile('pluto.ini'):
+
+        pluto_ini_file_exists  = False
+        orion2_ini_file_exists = False
+
+        if type(args[0]) == type(""):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+        
+        if orion2_ini_file_exists:
+            return True
+
+        if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
+                valid = 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
                 pass
         return False
 
-    @parallel_root_only
-    def print_key_parameters(self):
-        for a in ["current_time", "domain_dimensions", "domain_left_edge",
-                  "domain_right_edge"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in parameter file definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/definitions.py
--- a/yt/frontends/chombo/definitions.py
+++ b/yt/frontends/chombo/definitions.py
@@ -13,42 +13,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                                 }
 
-chombo2enzoDict = {"GAMMA": "Gamma",
-                  "Ref_ratio": "RefineBy"
-                                    }
-
-yt2chomboFieldsDict = {}
-chombo2ytFieldsDict = {}
-

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -28,9 +28,15 @@
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
+# Chombo does not have any known fields by itself.
+class ChomboFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = ()
+
+# Orion 2 Fields
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
-class ChomboFieldInfo(FieldInfoContainer):
+class Orion2FieldInfo(ChomboFieldInfo):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),
         ("energy-density", (eden_units, ["energy_density"], None)),

diff -r 43b6fc9b1094fa717396ea93c51fc9d9764044b9 -r 7de110e64df8ee328ab22f5977a907d994b6087c yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -27,7 +27,8 @@
     _data_string = 'data:datatype=0'
 
     def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf)
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
         self._handle = pf._handle
 
     _field_dict = None
@@ -35,16 +36,29 @@
     def field_dict(self):
         if self._field_dict is not None:
             return self._field_dict
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        temp =  self._handle['/'].attrs.items()[-ncomp:]
-        val, keys = zip(*temp)
-        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
-        self._field_dict = dict(zip(keys,val))
+        field_dict = {}
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
         return self._field_dict
+
+    _particle_field_index = None
+    @property
+    def particle_field_index(self):
+        if self._particle_field_index is not None:
+            return self._particle_field_index
+        field_dict = {}
+        for key, val in self._handle['/'].attrs.items():
+            if key.startswith('particle_'):
+                comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._particle_field_index = field_dict
+        return self._particle_field_index        
         
     def _read_field_names(self,grid):
         ncomp = int(self._handle['/'].attrs['num_components'])
-
         fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
@@ -90,6 +104,7 @@
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s grids",
                    size, [f2 for f1, f2 in fields], ng)
+
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
@@ -107,6 +122,80 @@
                 ind += nd
         return rv
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+
+            grid = chunks[0].objs[0]
+
+            for ftype, fname in fields:
+                rv[ftype, fname] = self._read_particles(grid, fname)
+
+            return rv
+
+        rv = {f:np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
+
+    def _read_particles(self, grid, name):
+
+        field_index = self.particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.index.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.index.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        # handle the case where this grid has no particles
+        if (offsets[lo] == offsets[hi]):
+            return np.array([], dtype=np.float64)
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "chombo2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "chombo1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle   
+
+class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
+    _dataset_type = "orion_chombo_native"
+
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list