[Yt-svn] yt-commit r1379 - in trunk: doc tests yt/extensions yt/lagos yt/lagos/hop

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Fri Jul 17 15:44:28 PDT 2009


Author: mturk
Date: Fri Jul 17 15:44:26 2009
New Revision: 1379
URL: http://yt.spacepope.org/changeset/1379

Log:
Dumping changes from the mercurial repository back into the main trunk.

Several changes to the yt internals:

    * New integer math covering grid with a different API.  Old covering grid has been renamed float_covering_grid. (Smoothed covering grids are still exclusively floating point)
    * Objects now register themselves and are automatically added to the hierarchy
    * data_style is now always a descriptive word.  (data_style will be changing form soon.)
    * non-ortho rays now include a 't' to get where the intersection point is along the parameter of the ray
    * RTIntegrator now gets compiled by default



Modified:
   trunk/doc/install_script.sh
   trunk/doc/install_script_osx.sh
   trunk/tests/test_lagos.py
   trunk/yt/extensions/EnzoSimulation.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/HierarchyType.py
   trunk/yt/lagos/OutputTypes.py
   trunk/yt/lagos/PointCombine.c
   trunk/yt/lagos/RTIntegrator.pyx
   trunk/yt/lagos/__init__.py
   trunk/yt/lagos/hop/hop_numpy.h
   trunk/yt/lagos/setup.py

Modified: trunk/doc/install_script.sh
==============================================================================
--- trunk/doc/install_script.sh	(original)
+++ trunk/doc/install_script.sh	Fri Jul 17 15:44:26 2009
@@ -28,11 +28,12 @@
 # If you absolutely can't get the fortran to work, try this:
 #NUMPY_ARGS="--fcompiler=fake"
 
-INST_WXPYTHON=0 # If you 't want to install wxPython, set this to 1
+INST_WXPYTHON=1 # If you 't want to install wxPython, set this to 1
 INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
-INST_HG=0       # Install Mercurial or not?
+INST_TRAITS=0   # Experimental TraitsUI installation
+INST_HG=1       # Install Mercurial or not?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -125,6 +126,10 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
+printf "%-15s = %s so I " "INST_TRAITS" "${INST_TRAITS}"
+get_willwont ${INST_TRAITS}
+echo "be installing Traits"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -208,7 +213,7 @@
 fi
 
 [ $INST_ZLIB -eq 1 ] && get_enzotools zlib-1.2.3.tar.bz2 
-[ $INST_WXPYTHON -eq 1 ] && get_enzotools wxPython-src-2.8.7.1.tar.bz2
+[ $INST_WXPYTHON -eq 1 ] && get_enzotools wxPython-src-2.8.9.1.tar.bz2
 get_enzotools Python-2.6.1.tgz
 get_enzotools numpy-1.2.1.tar.gz
 get_enzotools matplotlib-0.98.5.2.tar.gz
@@ -282,11 +287,11 @@
 
 export PYTHONPATH=${DEST_DIR}/lib/python2.6/site-packages/
 
-if [ $INST_WXPYTHON -eq 1 ] && [ ! -e wxPython-src-2.8.7.1/done ]
+if [ $INST_WXPYTHON -eq 1 ] && [ ! -e wxPython-src-2.8.9.1/done ]
 then
     echo "Installing wxPython.  This may take a while, but don't worry.  YT loves you."
-    [ ! -e wxPython-src-2.8.7.1 ] && tar xfj wxPython-src-2.8.7.1.tar.bz2
-    cd wxPython-src-2.8.7.1
+    [ ! -e wxPython-src-2.8.9.1 ] && tar xfj wxPython-src-2.8.9.1.tar.bz2
+    cd wxPython-src-2.8.9.1
 
     ( ./configure --prefix=${DEST_DIR}/ --with-opengl 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -334,6 +339,12 @@
     ( ${DEST_DIR}/bin/easy_install-2.6 mercurial 2>&1 ) 1>> ${LOG_FILE} || do_exit
 fi
 
+if [ $INST_WXPYTHON -eq 1 ] && [ $INST_TRAITS -eq 1 ]
+then
+    echo "Installing Traits"
+    ( ${DEST_DIR}/bin/easy_install-2.6 -U TraitsGUI TraitsBackendWX 2>&1 ) 1>> ${LOG_FILE} || do_exit
+fi
+
 echo
 echo
 echo "========================================================================"

Modified: trunk/doc/install_script_osx.sh
==============================================================================
--- trunk/doc/install_script_osx.sh	(original)
+++ trunk/doc/install_script_osx.sh	Fri Jul 17 15:44:26 2009
@@ -23,7 +23,7 @@
 # and install it on its own
 #HDF5_DIR=
 
-INST_HG=0       # Install Mercurial or not?
+INST_HG=1       # Install Mercurial or not?
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
 

Modified: trunk/tests/test_lagos.py
==============================================================================
--- trunk/tests/test_lagos.py	(original)
+++ trunk/tests/test_lagos.py	Fri Jul 17 15:44:26 2009
@@ -319,6 +319,8 @@
         LagosTestingBase.setUp(self)
 
     def testNoGhost(self):
+        DW = self.OutputFile["DomainRightEdge"] \
+           - self.OutputFile["DomainLeftEdge"]
         for g in self.hierarchy.grids:
             cube = g.retrieve_ghost_zones(0, "Density")
             self.assertTrue(na.all(cube["Density"] == g["Density"]))
@@ -326,6 +328,14 @@
             cube.flush_data(field="Density")
             self.assertTrue(na.all(g["Density"] == cube["Density"]))
 
+    def testOffsetDomain(self):
+        DW = self.OutputFile["DomainRightEdge"] \
+           - self.OutputFile["DomainLeftEdge"]
+        for g in self.hierarchy.grids:
+            cube = self.hierarchy.covering_grid(g.Level,
+                g.LeftEdge+DW, g.ActiveDimensions)
+            self.assertTrue(na.all(g["Density"] == cube["Density"]))
+
     def testTwoGhost(self):
         for g in self.hierarchy.grids:
             cube = g.retrieve_ghost_zones(2, "Density")
@@ -342,7 +352,7 @@
     
     def testFlushBackToGrids(self):
         ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
+        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
         cg["Ones"] *= 2.0
         cg.flush_data(field="Ones")
         for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
@@ -351,15 +361,15 @@
 
     def testFlushBackToNewCover(self):
         ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
+        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
         cg["tempContours"] = cg["Ones"] * 2.0
         cg.flush_data(field="tempContours")
-        cg2 = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
+        cg2 = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
         self.assertTrue(na.all(cg["tempContours"] == cg2["tempContours"]))
 
     def testRawFlushBack(self):
         ml = self.hierarchy.max_level
-        cg = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
+        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
         cg["DensityNew"] = cg["Density"] * 2.111
         cg.flush_data(field="DensityNew")
         for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
@@ -372,14 +382,16 @@
             self.assertAlmostEqual(max_diff, 2.111, 5)
 
     def testAllCover(self):
-        cg = self.hierarchy.covering_grid(0, [0.0]*3, [1.0]*3, [32,32,32])
-        self.assertTrue(cg["Density"].max() \
-                     == self.hierarchy.grids[0]["Density"].max())
-        self.assertTrue(cg["Density"].min() \
-                     == self.hierarchy.grids[0]["Density"].min())
+        cg = self.hierarchy.covering_grid(1, [0.0]*3, [32,32,32])
+        mi, ma = 1e30, -1e30
+        for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(2)]):
+            ma = max(ma, g["Density"].max())
+            mi = min(mi, g["Density"].min())
+        self.assertEqual(cg["Density"].max(), ma)
+        self.assertEqual(cg["Density"].min(), mi)
 
     def testCellVolume(self):
-        cg = self.hierarchy.covering_grid(2, [0.0]*3, [1.0]*3, [64,64,64])
+        cg = self.hierarchy.covering_grid(2, [0.0]*3, [64,64,64])
         self.assertEqual(na.unique(cg["CellVolume"]).size, 1)
 
 class TestDiskDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
@@ -432,7 +444,7 @@
 class TestSphereDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
     def setUp(self):
         DataTypeTestingBase.setUp(self)
-        self.data=self.hierarchy.sphere([0.5,0.5,0.5],2.0)
+        self.data=self.hierarchy.sphere([0.5,0.5,0.5],1.0)
     def testVolume(self):
         vol = self.data["CellVolume"].sum() / self.data.convert("cm")**3.0
         self.assertAlmostEqual(vol,1.0,7)

Modified: trunk/yt/extensions/EnzoSimulation.py
==============================================================================
--- trunk/yt/extensions/EnzoSimulation.py	(original)
+++ trunk/yt/extensions/EnzoSimulation.py	Fri Jul 17 15:44:26 2009
@@ -159,7 +159,7 @@
             try:
                 param, vals = map(str,line.split("="))
                 param = param.strip()
-                vals = vals.strip()
+                vals = vals.strip().split("#", 1)[0].split("//", 1)[0]
             except ValueError:
                 continue
             if EnzoParameterDict.has_key(param):

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Fri Jul 17 15:44:26 2009
@@ -25,6 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+data_object_registry = {}
+
 from yt.lagos import *
 
 def restore_grid_state(func):
@@ -99,7 +101,7 @@
         else: tr = self.data[field]
         return tr
 
-class AMRData:
+class AMRData(object):
     """
     Generic AMRData container.  By itself, will attempt to
     generate field, read fields (method defined by derived classes)
@@ -108,6 +110,13 @@
     _grids = None
     _num_ghost_zones = 0
     _con_args = ()
+    _skip_add = False
+
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                data_object_registry[cls._type_name] = cls
 
     def __init__(self, pf, fields, **kwargs):
         """
@@ -119,6 +128,8 @@
         if pf != None:
             self.pf = pf
             self.hierarchy = pf.hierarchy
+        self.hierarchy.objects.append(weakref.proxy(self))
+        mylog.debug("Appending object to %s", self.pf)
         if fields == None: fields = []
         self.fields = ensure_list(fields)[:]
         self.data = {}
@@ -376,7 +387,7 @@
                 continue
             mylog.info("Getting field %s from %s", field, len(self._grids))
             if field not in self.hierarchy.field_list and not in_grids:
-                if field != "dts" and self._generate_field(field):
+                if field not in ("dts", "t") and self._generate_field(field):
                     continue # True means we already assigned it
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)
@@ -445,7 +456,7 @@
         #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
         self.center = self.start_point
         self.set_field_parameter('center', self.start_point)
-        self._dts = {}
+        self._dts, self._ts = {}, {}
         #self._refresh_data()
 
     def _get_list_of_grids(self):
@@ -479,16 +490,19 @@
         mask = na.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
+        if field == 't': return self._ts[grid.id][mask]
         return grid[field][mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
         mask = na.zeros(grid.ActiveDimensions, dtype='int')
         dts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
         import RTIntegrator as RT
-        RT.VoxelTraversal(mask, dts, grid.LeftEdge, grid.RightEdge,
+        RT.VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
                           grid.dds, self.center, self.vec)
         self._dts[grid.id] = na.abs(dts)
+        self._ts[grid.id] = na.abs(ts)
         return mask
 
 class AMR2DData(AMRData, GridPropertiesMixin, ParallelAnalysisInterface):
@@ -1219,6 +1233,99 @@
         return  "%s/%s" % \
             (self._top_node, self.axis)
 
+class AMRFixedResProjectionBase(AMR2DData):
+    _top_node = "/Projections"
+    _type_name = "fixed_res_proj"
+    _con_args = ('axis', 'field', 'weight_field')
+    def __init__(self, axis, level, left_edge, dims,
+                 fields = None, pf=None, **kwargs):
+        """
+        A projection that provides fixed resolution output,
+        operating in a grid-by-grid fashion.
+        """
+        AMR2DData.__init__(self, axis, fields, pf, **kwargs)
+        self.left_edge = na.array(left_edge)
+        self.level = level
+        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
+        self.dims = na.array([dims]*2)
+        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
+        self.global_startindex = na.rint(self.left_edge/self.dds).astype('int64')
+        self._dls = {}
+        self.domain_width = na.rint((self.pf["DomainRightEdge"] -
+                    self.pf["DomainLeftEdge"])/self.dds).astype('int64')
+
+    def _get_list_of_grids(self):
+        if self._grids is not None: return
+        if na.any(self.left_edge < self.pf["DomainLeftEdge"]) or \
+           na.any(self.right_edge > self.pf["DomainRightEdge"]):
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
+                            self.left_edge, self.right_edge)
+            ind = slice(None)
+        else:
+            grids,ind = self.pf.hierarchy.get_box_grids(
+                            self.left_edge, self.right_edge)
+        level_ind = (self.pf.hierarchy.gridLevels.ravel()[ind] <= self.level)
+        sort_ind = na.argsort(self.pf.h.gridLevels.ravel()[ind][level_ind])
+        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
+
+    def _generate_coords(self):
+        xi, yi, zi = self.left_edge + self.dds*0.5
+        xf, yf, zf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
+        coords = na.mgrid[xi:xf:self.ActiveDimensions[0]*1j,
+                          yi:yf:self.ActiveDimensions[1]*1j,
+                          zi:zf:self.ActiveDimensions[2]*1j]
+        xax = x_dict[self.axis]
+        yax = y_dict[self.axis]
+        self['px'] = coords[xax]
+        self['py'] = coords[yax]
+        self['pdx'] = self.dds[xax]
+        self['pdy'] = self.dds[yax]
+
+    #@time_execution
+    def get_data(self, fields = None):
+        """
+        Iterates over the list of fields and generates/reads them all.
+        """
+        self._get_list_of_grids()
+        if not self.has_key('pdx'):
+            self._generate_coords()
+        if fields == None:
+            fields_to_get = self.fields[:]
+        else:
+            fields_to_get = ensure_list(fields)
+        temp_data = {}
+        for field in fields_to_get:
+            self[field] = na.zeros(self.dims, dtype='float64')
+        dls = self.__setup_dls(fields_to_get)
+        for grid in self._get_grids():
+            self._get_data_from_grid(grid, fields_to_get, dls)
+        for field in fields_to_get:
+            self[field] = self._mpi_allsum(self[field])
+            conv = self.pf.units[self.pf.field_info[field].projection_conversion]
+            self[field] *= conv
+
+    def __setup_dls(self, fields):
+        dls = {}
+        for level in range(self.level+1):
+            dls[level] = []
+            grid = self.select_grids(level)[0]
+            for field in fields:
+                if field is None: continue
+                dls[level].append(float(just_one(grid['d%s' % axis_names[self.axis]])))
+        return dls
+
+    def _get_data_from_grid(self, grid, fields, dls):
+        g_fields = [grid[field] for field in fields]
+        c_fields = [self[field] for field in fields]
+        ref_ratio = self.pf["RefineBy"]**(self.level - grid.Level)
+        PointCombine.FillBuffer(ref_ratio,
+            grid.get_global_startindex(), self.global_startindex,
+            c_fields, g_fields, 
+            self.ActiveDimensions, grid.ActiveDimensions,
+            grid.child_mask, self.domain_width, dls[grid.Level],
+            self.axis)
+
 class AMR3DData(AMRData, GridPropertiesMixin):
     _key_fields = ['x','y','z','dx','dy','dz']
     """
@@ -1683,6 +1790,7 @@
     """
     AMRRegion without any dx padding for cell selection
     """
+    _type_name = "region_strict"
     _dx_pad = 0.0
 
 class AMRPeriodicRegionBase(AMR3DData):
@@ -1744,9 +1852,10 @@
     """
     AMRPeriodicRegion without any dx padding for cell selection
     """
+    _type_name = "periodic_region_strict"
     _dx_pad = 0.0
 
-class AMRGridCollection(AMR3DData):
+class AMRGridCollectionBase(AMR3DData):
     """
     An arbitrary selection of grids, within which we accept all points.
     """
@@ -1802,7 +1911,7 @@
         # Now we sort by level
         grids = grids.tolist()
         grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
-        self._grids = na.array(grids)
+        self._grids = na.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         r = na.abs(grid._corners - self.center)
@@ -1824,7 +1933,7 @@
             self._cut_masks[grid.id] = cm
         return cm
 
-class AMRCoveringGridBase(AMR3DData):
+class AMRFloatCoveringGridBase(AMR3DData):
     """
     Covering grids represent fixed-resolution data over a given region.
     In order to achieve this goal -- for instance in order to obtain ghost
@@ -1833,7 +1942,7 @@
     scales) on the input data.
     """
     _spatial = True
-    _type_name = "covering_grid"
+    _type_name = "float_covering_grid"
     _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, right_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
@@ -1973,7 +2082,7 @@
     def RightEdge(self):
         return self.right_edge
 
-class AMRSmoothedCoveringGridBase(AMRCoveringGridBase):
+class AMRSmoothedCoveringGridBase(AMRFloatCoveringGridBase):
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
         dlog2 = na.log10(kwargs['dims'])/na.log10(2)
@@ -1982,7 +2091,7 @@
             #mylog.warning("Must be power of two dimensions")
             #raise ValueError
         kwargs['num_ghost_zones'] = 0
-        AMRCoveringGridBase.__init__(self, *args, **kwargs)
+        AMRFloatCoveringGridBase.__init__(self, *args, **kwargs)
 
     def _get_list_of_grids(self):
         if na.any(self.left_edge - self.dds < self.pf["DomainLeftEdge"]) or \
@@ -2078,19 +2187,149 @@
     def flush_data(self, *args, **kwargs):
         raise KeyError("Can't do this")
 
+class AMRCoveringGridBase(AMR3DData):
+    _spatial = True
+    _type_name = "covering_grid"
+    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    def __init__(self, level, left_edge, dims, fields = None,
+                 pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
+        AMR3DData.__init__(self, center=None, fields=fields, pf=pf, **kwargs)
+        self.left_edge = na.array(left_edge)
+        self.level = level
+        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
+        self.ActiveDimensions = na.array(dims)
+        self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
+        self._num_ghost_zones = num_ghost_zones
+        self._use_pbar = use_pbar
+        self.global_startindex = na.rint(self.left_edge/self.dds).astype('int64')
+        self.domain_width = na.rint((self.pf["DomainRightEdge"] -
+                    self.pf["DomainLeftEdge"])/self.dds).astype('int64')
+        self._refresh_data()
+
+    def _get_list_of_grids(self, buffer = 0.0):
+        if self._grids is not None: return
+        if na.any(self.left_edge - buffer < self.pf["DomainLeftEdge"]) or \
+           na.any(self.right_edge + buffer > self.pf["DomainRightEdge"]):
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
+                            self.left_edge - buffer,
+                            self.right_edge + buffer)
+            ind = slice(None)
+        else:
+            grids,ind = self.pf.hierarchy.get_box_grids(
+                            self.left_edge - buffer,
+                            self.right_edge + buffer)
+        level_ind = (self.pf.hierarchy.gridLevels.ravel()[ind] <= self.level)
+        sort_ind = na.argsort(self.pf.h.gridLevels.ravel()[ind][level_ind])
+        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
+
+    def _refresh_data(self):
+        AMR3DData._refresh_data(self)
+        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+
+    def get_data(self, fields=None):
+        if self._grids is None:
+            self._get_list_of_grids()
+        if fields is None:
+            fields = self.fields[:]
+        else:
+            fields = ensure_list(fields)
+        obtain_fields = []
+        for field in fields:
+            if self.data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            obtain_fields.append(field)
+            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+        if len(obtain_fields) == 0: return
+        mylog.debug("Getting fields %s from %s possible grids",
+                   obtain_fields, len(self._grids))
+        if self._use_pbar: pbar = \
+                get_pbar('Searching grids for values ', len(self._grids))
+        for i, grid in enumerate(self._grids):
+            if self._use_pbar: pbar.update(i)
+            self._get_data_from_grid(grid, obtain_fields)
+            if not na.any(self[obtain_fields[0]] == -999): break
+        if self._use_pbar: pbar.finish()
+        if na.any(self[obtain_fields[0]] == -999):
+            # and self.dx < self.hierarchy.grids[0].dx:
+            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            mylog.error("Covering problem: %s cells are uncovered", n_bad)
+            raise KeyError(n_bad)
+            
+    def _generate_field(self, field):
+        if self.pf.field_info.has_key(field):
+            # First we check the validator; this might even raise!
+            self.pf.field_info[field].check_available(self)
+            self[field] = self.pf.field_info[field](self)
+        else: # Can't find the field, try as it might
+            raise exceptions.KeyError(field)
+
+    def flush_data(self, field=None):
+        """
+        Any modifications made to the data in this object are pushed back
+        to the originating grids, except the cells where those grids are both
+        below the current level `and` have child cells.
+        """
+        self._get_list_of_grids()
+        # We don't generate coordinates here.
+        if field == None:
+            fields_to_get = self.fields
+        else:
+            fields_to_get = ensure_list(field)
+        for grid in self._grids:
+            self._flush_data_to_grid(grid, fields_to_get)
+
+    @restore_grid_state
+    def _get_data_from_grid(self, grid, fields):
+        ll = int(grid.Level == self.level)
+        ref_ratio = self.pf["RefineBy"]**(self.level - grid.Level)
+        g_fields = [grid[field] for field in fields]
+        c_fields = [self[field] for field in fields]
+        PointCombine.FillRegion(ref_ratio,
+            grid.get_global_startindex(), self.global_startindex,
+            c_fields, g_fields, 
+            self.ActiveDimensions, grid.ActiveDimensions,
+            grid.child_mask, self.domain_width, ll, 0)
+
+    def _flush_data_to_grid(self, grid, fields):
+        ll = int(grid.Level == self.level)
+        ref_ratio = self.pf["RefineBy"]**(self.level - grid.Level)
+        g_fields = []
+        for field in fields:
+            if not grid.has_key(field): grid[field] = \
+               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+            g_fields.append(grid[field])
+        c_fields = [self[field] for field in fields]
+        PointCombine.FillRegion(ref_ratio,
+            grid.get_global_startindex(), self.global_startindex,
+            c_fields, g_fields, 
+            self.ActiveDimensions, grid.ActiveDimensions,
+            grid.child_mask, self.domain_width, ll, 1)
 
-class EnzoOrthoRayBase(AMROrthoRayBase): pass
-class EnzoRayBase(AMRRayBase): pass
-class EnzoSliceBase(AMRSliceBase): pass
-class EnzoCuttingPlaneBase(AMRCuttingPlaneBase): pass
-class EnzoProjBase(AMRProjBase): pass
-class EnzoCylinderBase(AMRCylinderBase): pass
-class EnzoRegionBase(AMRRegionBase): pass
-class EnzoPeriodicRegionBase(AMRPeriodicRegionBase): pass
-class EnzoGridCollection(AMRGridCollection): pass
-class EnzoSphereBase(AMRSphereBase): pass
-class EnzoCoveringGrid(AMRCoveringGridBase): pass
-class EnzoSmoothedCoveringGrid(AMRSmoothedCoveringGridBase): pass
+    @property
+    def LeftEdge(self):
+        return self.left_edge
+
+    @property
+    def RightEdge(self):
+        return self.right_edge
+
+class AMRIntSmoothedCoveringGridBase(AMRCoveringGridBase):
+    _skip_add = True
+    def _get_list_of_grids(self):
+        buffer = self.pf.h.select_grids(0)[0].dds
+        AMRCoveringGridBase._get_list_of_grids(buffer)
+        self._grids = self._grids[::-1]
+
+    def _get_data_from_grid(self, grid, fields):
+        pass
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Fri Jul 17 15:44:26 2009
@@ -34,6 +34,7 @@
     _id_offset = 1
 
     _type_name = 'grid'
+    _skip_add = True
     _con_args = ('id', 'filename')
 
     def __init__(self, id, filename=None, hierarchy = None):
@@ -359,9 +360,11 @@
                   'num_ghost_zones':n_zones,
                   'use_pbar':False, 'fields':fields}
         if smoothed:
-            cube = self.hierarchy.smoothed_covering_grid(*args, **kwargs)
+            cube = self.hierarchy.smoothed_covering_grid(
+                level, new_left_edge, new_right_edge, **kwargs)
         else:
-            cube = self.hierarchy.covering_grid(*args, **kwargs)
+            cube = self.hierarchy.covering_grid(
+                level, new_left_edge, **kwargs)
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True):

Modified: trunk/yt/lagos/HierarchyType.py
==============================================================================
--- trunk/yt/lagos/HierarchyType.py	(original)
+++ trunk/yt/lagos/HierarchyType.py	Fri Jul 17 15:44:26 2009
@@ -30,17 +30,17 @@
 #import yt.enki
 
 _data_style_funcs = \
-   { 4: (readDataHDF4,readAllDataHDF4, getFieldsHDF4, readDataSliceHDF4,
+   { 'enzo_hdf4': (readDataHDF4,readAllDataHDF4, getFieldsHDF4, readDataSliceHDF4,
          getExceptionHDF4, DataQueueHDF4),
      'enzo_hdf4_2d': (readDataHDF4, readAllDataHDF4, getFieldsHDF4, readDataSliceHDF4_2D,
          getExceptionHDF4, DataQueueHDF4_2D),
-     5: (readDataHDF5, readAllDataHDF5, getFieldsHDF5, readDataSliceHDF5,
+     'enzo_hdf5': (readDataHDF5, readAllDataHDF5, getFieldsHDF5, readDataSliceHDF5,
          getExceptionHDF5, DataQueueHDF5),
-     6: (readDataPacked, readAllDataPacked, getFieldsPacked, readDataSlicePacked,
+     'enzo_packed_3d': (readDataPacked, readAllDataPacked, getFieldsPacked, readDataSlicePacked,
          getExceptionHDF5, DataQueuePackedHDF5),
-     7: (readDataNative, readAllDataNative, None, readDataSliceNative,
+     'orion_native': (readDataNative, readAllDataNative, None, readDataSliceNative,
          getExceptionHDF5, DataQueueNative), \
-     8: (readDataInMemory, readAllDataInMemory, getFieldsInMemory, readDataSliceInMemory,
+     'enzo_inline': (readDataInMemory, readAllDataInMemory, getFieldsInMemory, readDataSliceInMemory,
          getExceptionInMemory, DataQueueInMemory),
      'enzo_packed_2d': (readDataPacked, readAllDataPacked, getFieldsPacked, readDataSlicePacked2D,
          getExceptionHDF5, DataQueuePacked2D),
@@ -105,7 +105,7 @@
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
 
     def __setup_filemap(self, grid):
-        if not self.data_style == 6:
+        if not self.data_style == 'enzo_packed_3d':
             return
         self.cpu_map[grid.filename].append(grid)
 
@@ -240,26 +240,11 @@
 
     def _setup_classes(self, dd):
         self.object_types = []
-        self._add_object_class('proj', "AMRProj", AMRProjBase, dd)
-        self._add_object_class('slice', "AMRSlice", AMRSliceBase, dd)
-        self._add_object_class('region', "AMRRegion", AMRRegionBase, dd)
-        self._add_object_class('region_strict', "AMRRegionStrict",
-                        AMRRegionStrictBase, dd)
-        self._add_object_class('periodic_region', "AMRPeriodicRegion",
-                        AMRPeriodicRegionBase, dd)
-        self._add_object_class('periodic_region_strict', "AMRPeriodicRegionStrict",
-                        AMRPeriodicRegionStrictBase, dd)
-        self._add_object_class('covering_grid', "AMRCoveringGrid",
-                        AMRCoveringGridBase, dd)
-        self._add_object_class('smoothed_covering_grid', "AMRSmoothedCoveringGrid",
-                        AMRSmoothedCoveringGridBase, dd)
-        self._add_object_class('sphere', "AMRSphere", AMRSphereBase, dd)
-        self._add_object_class('cutting', "AMRCuttingPlane", AMRCuttingPlaneBase, dd)
-        self._add_object_class('ray', "AMRRay", AMRRayBase, dd)
-        self._add_object_class('ortho_ray', "AMROrthoRay", AMROrthoRayBase, dd)
-        self._add_object_class('disk', "AMRCylinder", AMRCylinderBase, dd)
-        self._add_object_class('grid_collection', "AMRGridCollection", AMRGridCollection, dd)
-        self._add_object_class('extracted_region', "ExtractedRegion", ExtractedRegionBase, dd)
+        self.objects = []
+        for name, cls in sorted(data_object_registry.items()):
+            cname = cls.__name__
+            if cname.endswith("Base"): cname = cname[:-4]
+            self._add_object_class(name, cname, cls, dd)
         self.object_types.sort()
 
     def all_data(self, find_max=False):
@@ -685,6 +670,9 @@
 
         AMRHierarchy.__init__(self, pf)
 
+        # sync it back
+        self.parameter_file.data_style = self.data_style
+
         del self.__hierarchy_string 
 
     def _setup_classes(self):
@@ -704,16 +692,16 @@
             self._strip_path = True
         try:
             a = SD.SD(testGrid)
-            self.data_style = 4
+            self.data_style = 'enzo_hdf4'
             mylog.debug("Detected HDF4")
         except:
             list_of_sets = HDF5LightReader.ReadListOfDatasets(testGrid, "/")
             if len(list_of_sets) == 0 and rank == 3:
                 mylog.debug("Detected packed HDF5")
-                self.data_style = 6
+                self.data_style = 'enzo_packed_3d'
             elif len(list_of_sets) > 0 and rank == 3:
                 mylog.debug("Detected unpacked HDF5")
-                self.data_style = 5
+                self.data_style = 'enzo_hdf5'
             elif len(list_of_sets) == 0 and rank == 2:
                 mylog.debug("Detect packed 2D")
                 self.data_style = 'enzo_packed_2d'
@@ -724,7 +712,7 @@
                 raise TypeError
 
     def __setup_filemap(self, grid):
-        if not self.data_style == 6:
+        if not self.data_style == 'enzo_packed_3d':
             return
         try:
             self.cpu_map[grid.filename].append(grid)
@@ -869,7 +857,7 @@
         Instantiates all of the grid objects, with their appropriate
         parameters.  This is the work-horse.
         """
-        if self.data_style == 6:
+        if self.data_style == 'enzo_packed_3d':
             self.cpu_map = defaultdict(lambda: [][:])
             self.file_access = {}
         harray = self.get_data("/", "Hierarchy")
@@ -984,7 +972,7 @@
         return self.grids[(random_sample,)]
 
 class EnzoHierarchyInMemory(EnzoHierarchy):
-    _data_style = 8
+    _data_style = 'inline'
     def _obtain_enzo(self):
         import enzo; return enzo
 
@@ -1163,7 +1151,7 @@
     yield buf  # First line.
 
 class OrionHierarchy(AMRHierarchy):
-    def __init__(self,pf,data_style=7):
+    def __init__(self, pf, data_style='orion_native'):
         self.field_info = OrionFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)

Modified: trunk/yt/lagos/OutputTypes.py
==============================================================================
--- trunk/yt/lagos/OutputTypes.py	(original)
+++ trunk/yt/lagos/OutputTypes.py	Fri Jul 17 15:44:26 2009
@@ -358,6 +358,15 @@
         if not self.has_key("TimeUnits"):
             self.conversion_factors["Time"] = self["LengthUnits"] / self["x-velocity"]
 
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
     def cosmology_get_units(self):
         """
         Return an Enzo-fortran style dictionary of units to feed into custom
@@ -396,7 +405,7 @@
 
 class EnzoStaticOutputInMemory(EnzoStaticOutput):
     _hierarchy_class = EnzoHierarchyInMemory
-    _data_style = 8
+    _data_style = 'inline'
 
     def __new__(cls, *args, **kwargs):
         obj = object.__new__(cls)
@@ -451,7 +460,8 @@
     _hierarchy_class = OrionHierarchy
     _fieldinfo_class = OrionFieldContainer
 
-    def __init__(self, plotname, paramFilename=None,fparamFilename=None,data_style=7,paranoia=False):
+    def __init__(self, plotname, paramFilename=None, fparamFilename=None,
+                 data_style='orion_native', paranoia=False):
         """need to override for Orion file structure.
 
         the paramfile is usually called "inputs"
@@ -471,7 +481,8 @@
 
         self.fparameters = {}
 
-        StaticOutput.__init__(self, plotname.rstrip("/"), data_style=7)
+        StaticOutput.__init__(self, plotname.rstrip("/"),
+                              data_style='orion_native')
         self.field_info = self._fieldinfo_class()
 
         # self.directory is the directory ENCLOSING the pltNNNN directory

Modified: trunk/yt/lagos/PointCombine.c
==============================================================================
--- trunk/yt/lagos/PointCombine.c	(original)
+++ trunk/yt/lagos/PointCombine.c	Fri Jul 17 15:44:26 2009
@@ -864,6 +864,423 @@
     return to_return;
 }
 
+static PyObject *Py_FillRegion(PyObject *obj, PyObject *args)
+{
+    PyObject *oc_data, *og_data,
+             *oc_start, *og_start,
+             *oc_dims, *og_dims, *omask;
+    PyObject *tg_data, *tc_data, *dw_data;
+    oc_data = og_data = oc_start = og_start = oc_dims = og_dims = omask = NULL;
+    tg_data = tc_data = dw_data = NULL;
+    PyArrayObject **g_data, **c_data, *mask,
+                  *g_start, *c_start, *c_dims, *g_dims, *dwa;
+    mask = g_start = c_start = c_dims = g_dims = NULL;
+    int refratio, ll, direction, n;
+    npy_int64 gxs, gys, gzs, gxe, gye, gze;
+    npy_int64 cxs, cys, czs, cxe, cye, cze;
+    npy_int64 ixs, iys, izs, ixe, iye, ize;
+    npy_int64 gxi, gyi, gzi, cxi, cyi, czi;
+    npy_int64 cdx, cdy, cdz;
+    npy_int64 dw[3];
+    int i;
+    int ci, cj, ck, ri, rj, rk;
+    int total = 0;
+    void (*to_call)(PyArrayObject* c_data, npy_int64 xc,
+                         npy_int64 yc, npy_int64 zc,
+                    PyArrayObject* g_data, npy_int64 xg,
+                         npy_int64 yg, npy_int64 zg);
+    if (!PyArg_ParseTuple(args, "iOOOOOOOOii",
+            &refratio, &og_start, &oc_start,
+            &oc_data, &og_data,
+            &oc_dims, &og_dims, &omask, &dw_data, &ll, &direction))
+    return PyErr_Format(_dataCubeError,
+            "DataCubeGeneric: Invalid parameters.");
+
+    if (direction == 0) to_call = dcRefine;
+    else if (direction == 1) to_call = dcReplace;
+
+    g_start = (PyArrayObject *) PyArray_FromAny(og_start,
+                PyArray_DescrFromType(NPY_INT64), 1, 1, 0, NULL);
+    if(g_start == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: g_start invalid.");
+      goto _fail;
+    }
+
+    c_start = (PyArrayObject *) PyArray_FromAny(oc_start,
+                PyArray_DescrFromType(NPY_INT64), 1, 1, 0, NULL);
+    if(c_start == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: c_start invalid.");
+      goto _fail;
+    }
+
+    g_dims  = (PyArrayObject *) PyArray_FromAny(og_dims,
+                PyArray_DescrFromType(NPY_INT32), 1, 1, 0, NULL);
+    if(g_dims == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: g_dims invalid.");
+      goto _fail;
+    }
+
+    c_dims  = (PyArrayObject *) PyArray_FromAny(oc_dims,
+                PyArray_DescrFromType(NPY_INT32), 1, 1, 0, NULL);
+    if(c_dims == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: c_dims invalid.");
+      goto _fail;
+    }
+
+    mask    = (PyArrayObject *) PyArray_FromAny(omask,
+                PyArray_DescrFromType(NPY_INT32), 3, 3, 0, NULL);
+    if(mask == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: mask invalid.");
+      goto _fail;
+    }
+
+    dwa     = (PyArrayObject *) PyArray_FromAny(dw_data,
+                PyArray_DescrFromType(NPY_INT64), 1, 1, 0, NULL);
+    if(dwa == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: domain width invalid.");
+      goto _fail;
+    }
+    for (i=0;i<3;i++)dw[i] = *(npy_int64*) PyArray_GETPTR1(dwa, i);
+
+    int n_fields = PyList_Size(oc_data);
+    if(n_fields == 0) {
+      PyErr_Format(_dataCubeError,
+          "CombineGrids: Length zero for c_data is invalid.");
+      goto _fail;
+    }
+    if (!PyList_Check(og_data) || (PyList_Size(og_data) != n_fields)){
+      PyErr_Format(_dataCubeError,
+          "CombineGrids: g_data must be a list of arrays same length as c_data!");
+      goto _fail;
+    }
+
+    c_data = (PyArrayObject**)
+             malloc(sizeof(PyArrayObject*)*n_fields);
+    g_data = (PyArrayObject**)
+             malloc(sizeof(PyArrayObject*)*n_fields);
+    for (n=0;n<n_fields;n++)c_data[n]=g_data[n]=NULL;
+    for (n=0;n<n_fields;n++){
+      tg_data = (PyObject *) PyList_GetItem(og_data, n);
+      /* We set up an array so we only have to do this once 
+         Note that this is an array in the C sense, not the NumPy sense */
+      g_data[n]    = (PyArrayObject *) PyArray_FromAny(tg_data,
+          PyArray_DescrFromType(NPY_FLOAT64), 3, 3,
+          NPY_UPDATEIFCOPY, NULL);
+      if((g_data[n]==NULL) || (g_data[n]->nd != 3)) {
+        PyErr_Format(_dataCubeError,
+            "CombineGrids: Three dimensions required for g_data[%i].",n);
+        goto _fail;
+      }
+      tc_data = (PyObject *) PyList_GetItem(oc_data, n);
+      c_data[n]    = (PyArrayObject *) PyArray_FromAny(tc_data,
+          PyArray_DescrFromType(NPY_FLOAT64), 3, 3,
+          NPY_UPDATEIFCOPY, NULL);
+      if((c_data[n]==NULL) || (c_data[n]->nd != 3)) {
+        PyErr_Format(_dataCubeError,
+            "CombineGrids: Three dimensions required for c_data[%i].",n);
+        goto _fail;
+      }
+    }
+
+    /* g[xyz][se] are the start and end index in integers 
+       of the grid, at its refinement level               */
+    gxs = *(npy_int64 *) PyArray_GETPTR1(og_start, 0);
+    gys = *(npy_int64 *) PyArray_GETPTR1(og_start, 1);
+    gzs = *(npy_int64 *) PyArray_GETPTR1(og_start, 2);
+    gxe = gxs + *(npy_int32 *) PyArray_GETPTR1(og_dims, 0);
+    gye = gys + *(npy_int32 *) PyArray_GETPTR1(og_dims, 1);
+    gze = gzs + *(npy_int32 *) PyArray_GETPTR1(og_dims, 2);
+
+    /* c[xyz][se] are the start and end index in integers
+       of the covering grid, at its refinement level      */
+    cxs = *(npy_int64 *) PyArray_GETPTR1(oc_start, 0);
+    cys = *(npy_int64 *) PyArray_GETPTR1(oc_start, 1);
+    czs = *(npy_int64 *) PyArray_GETPTR1(oc_start, 2);
+
+    /* cd[xyz] are the dimensions of the covering grid */
+    cdx = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 0));
+    cdy = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 1));
+    cdz = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 2));
+    cxe = (cxs + cdx - 1);
+    cye = (cys + cdy - 1);
+    cze = (czs + cdz - 1);
+
+    /* It turns out that C89 doesn't define a mechanism for choosing the sign
+       of the remainder.
+    */
+        //fprintf(stderr, "ci == %d, cxi == %d, dw[0] == %d\n", (int) ci, (int) cxi, (int) dw[0]);
+    for(cxi=cxs;cxi<=cxe;cxi++) {
+        ci = (cxi % dw[0]);
+        ci = (ci < 0) ? ci + dw[0] : ci;
+        if ( ci < gxs*refratio || ci >= gxe*refratio) continue;
+        gxi = floor(ci / refratio) - gxs;
+        for(cyi=cys;cyi<=cye;cyi++) {
+            cj = cyi % dw[1];
+            cj = (cj < 0) ? cj + dw[1] : cj;
+            if ( cj < gys*refratio || cj >= gye*refratio) continue;
+            gyi = floor(cj / refratio) - gys;
+            for(czi=czs;czi<=cze;czi++) {
+                ck = czi % dw[2];
+                ck = (ck < 0) ? ck + dw[2] : ck;
+                if ( ck < gzs*refratio || ck >= gze*refratio) continue;
+                gzi = floor(ck / refratio) - gzs;
+                    if ((ll) || (*(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)) 
+                {
+                for(n=0;n<n_fields;n++){
+                    to_call(c_data[n],
+                        cxi - cxs, cyi - cys, czi - czs,
+                        g_data[n], gxi, gyi, gzi);
+                }
+                total += 1;
+                }
+            }
+        }
+    }
+
+    Py_DECREF(g_start);
+    Py_DECREF(c_start);
+    Py_DECREF(g_dims);
+    Py_DECREF(c_dims);
+    Py_DECREF(mask);
+    for(n=0;n<n_fields;n++) {
+        Py_DECREF(g_data[n]);
+        Py_DECREF(c_data[n]);
+    }
+    free(g_data);
+    free(c_data);
+    PyObject *status = PyInt_FromLong(total);
+    return status;
+
+_fail:
+    Py_XDECREF(g_start);
+    Py_XDECREF(c_start);
+    Py_XDECREF(g_dims);
+    Py_XDECREF(c_dims);
+    Py_XDECREF(mask);
+    for(n=0;n<n_fields;n++) {
+        if(g_data[n]!=NULL){Py_XDECREF(g_data[n]);}
+        if(c_data[n]!=NULL){Py_XDECREF(c_data[n]);}
+    }
+    if(g_data!=NULL)free(g_data);
+    if(c_data!=NULL)free(c_data);
+    return NULL;
+}
+
+static PyObject *Py_FillBuffer(PyObject *obj, PyObject *args)
+{
+    PyObject *oc_data, *og_data,
+             *oc_start, *og_start,
+             *oc_dims, *og_dims, *omask, *odls;
+    PyObject *tg_data, *tc_data, *dw_data;
+    oc_data = og_data = oc_start = og_start = oc_dims = og_dims = omask = NULL;
+    tg_data = tc_data = dw_data = odls = NULL;
+    PyArrayObject **g_data, **c_data, *mask,
+                  *g_start, *c_start, *c_dims, *g_dims, *dwa;
+    mask = g_start = c_start = c_dims = g_dims = NULL;
+    double *dls = NULL;
+    int refratio, ll, direction, n;
+    npy_int64 gxs, gys, gzs, gxe, gye, gze;
+    npy_int64 cxs, cys, czs, cxe, cye, cze;
+    npy_int64 ixs, iys, izs, ixe, iye, ize;
+    npy_int64 gxi, gyi, gzi, cxi, cyi, czi;
+    npy_int64 cdx, cdy, cdz;
+    npy_int64 dw[3];
+    int i, axis;
+    int ci, cj, ck, ri, rj, rk;
+    int total = 0;
+
+    if (!PyArg_ParseTuple(args, "iOOOOOOOOOi",
+            &refratio, &og_start, &oc_start,
+            &oc_data, &og_data,
+            &oc_dims, &og_dims, &omask, &dw_data, &odls, &axis))
+    return PyErr_Format(_dataCubeError,
+            "DataCubeGeneric: Invalid parameters.");
+
+    g_start = (PyArrayObject *) PyArray_FromAny(og_start,
+                PyArray_DescrFromType(NPY_INT64), 1, 1, 0, NULL);
+    if(g_start == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: g_start invalid.");
+      goto _fail;
+    }
+
+    c_start = (PyArrayObject *) PyArray_FromAny(oc_start,
+                PyArray_DescrFromType(NPY_INT64), 1, 1, 0, NULL);
+    if(c_start == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: c_start invalid.");
+      goto _fail;
+    }
+
+    g_dims  = (PyArrayObject *) PyArray_FromAny(og_dims,
+                PyArray_DescrFromType(NPY_INT32), 1, 1, 0, NULL);
+    if(g_dims == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: g_dims invalid.");
+      goto _fail;
+    }
+
+    c_dims  = (PyArrayObject *) PyArray_FromAny(oc_dims,
+                PyArray_DescrFromType(NPY_INT32), 1, 1, 0, NULL);
+    if(c_dims == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: c_dims invalid.");
+      goto _fail;
+    }
+
+    mask    = (PyArrayObject *) PyArray_FromAny(omask,
+                PyArray_DescrFromType(NPY_INT32), 3, 3, 0, NULL);
+    if(mask == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: mask invalid.");
+      goto _fail;
+    }
+
+    dwa     = (PyArrayObject *) PyArray_FromAny(dw_data,
+                PyArray_DescrFromType(NPY_INT64), 1, 1, 0, NULL);
+    if(dwa == NULL){
+      PyErr_Format(_dataCubeError, "FillRegion: domain width invalid.");
+      goto _fail;
+    }
+    for (i=0;i<3;i++)dw[i] = *(npy_int64*) PyArray_GETPTR1(dwa, i);
+
+    int n_fields = PyList_Size(oc_data);
+    if(n_fields == 0) {
+      PyErr_Format(_dataCubeError,
+          "CombineGrids: Length zero for c_data is invalid.");
+      goto _fail;
+    }
+    if (!PyList_Check(og_data) || (PyList_Size(og_data) != n_fields)){
+      PyErr_Format(_dataCubeError,
+          "CombineGrids: g_data must be a list of arrays same length as c_data!");
+      goto _fail;
+    }
+    if (!PyList_Check(odls) || (PyList_Size(odls) != n_fields)){
+      PyErr_Format(_dataCubeError,
+          "CombineGrids: dls must be a list of arrays same length as c_data!");
+      goto _fail;
+    }
+
+    c_data = (PyArrayObject**)
+             malloc(sizeof(PyArrayObject*)*n_fields);
+    g_data = (PyArrayObject**)
+             malloc(sizeof(PyArrayObject*)*n_fields);
+    dls = (double *) malloc(sizeof(double) * n_fields);
+    PyObject *temp = NULL;
+    for (n=0;n<n_fields;n++)c_data[n]=g_data[n]=NULL;
+    for (n=0;n<n_fields;n++){
+      /* Borrowed reference ... */
+      temp = PyList_GetItem(odls, n);
+      dls[n] = PyFloat_AsDouble(temp);
+
+      tg_data = (PyObject *) PyList_GetItem(og_data, n);
+      /* We set up an array so we only have to do this once 
+         Note that this is an array in the C sense, not the NumPy sense */
+      g_data[n]    = (PyArrayObject *) PyArray_FromAny(tg_data,
+          PyArray_DescrFromType(NPY_FLOAT64), 3, 3,
+          NPY_UPDATEIFCOPY, NULL);
+      if((g_data[n]==NULL) || (g_data[n]->nd != 3)) {
+        PyErr_Format(_dataCubeError,
+            "CombineGrids: Three dimensions required for g_data[%i].",n);
+        goto _fail;
+      }
+      tc_data = (PyObject *) PyList_GetItem(oc_data, n);
+      c_data[n]    = (PyArrayObject *) PyArray_FromAny(tc_data,
+          PyArray_DescrFromType(NPY_FLOAT64), 2, 2,
+          NPY_UPDATEIFCOPY, NULL);
+      if((c_data[n]==NULL) || (c_data[n]->nd != 2)) {
+        PyErr_Format(_dataCubeError,
+            "CombineGrids: Two dimensions required for c_data[%i].",n);
+        goto _fail;
+      }
+    }
+
+    /* g[xyz][se] are the start and end index in integers 
+       of the grid, at its refinement level               */
+    gxs = *(npy_int64 *) PyArray_GETPTR1(og_start, 0);
+    gys = *(npy_int64 *) PyArray_GETPTR1(og_start, 1);
+    gzs = *(npy_int64 *) PyArray_GETPTR1(og_start, 2);
+    gxe = gxs + *(npy_int32 *) PyArray_GETPTR1(og_dims, 0);
+    gye = gys + *(npy_int32 *) PyArray_GETPTR1(og_dims, 1);
+    gze = gzs + *(npy_int32 *) PyArray_GETPTR1(og_dims, 2);
+
+    /* c[xyz][se] are the start and end index in integers
+       of the covering grid, at its refinement level      */
+    cxs = *(npy_int64 *) PyArray_GETPTR1(oc_start, 0);
+    cys = *(npy_int64 *) PyArray_GETPTR1(oc_start, 1);
+    czs = *(npy_int64 *) PyArray_GETPTR1(oc_start, 2);
+
+    /* cd[xyz] are the dimensions of the covering grid */
+    cdx = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 0));
+    cdy = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 1));
+    cdz = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 2));
+    cxe = (cxs + cdx - 1);
+    cye = (cys + cdy - 1);
+    cze = (czs + cdz - 1);
+
+    /* It turns out that C89 doesn't define a mechanism for choosing the sign
+       of the remainder.
+    */
+    int x_loc, y_loc; // For access into the buffer
+    for(cxi=cxs;cxi<=cxe;cxi++) {
+        ci = (cxi % dw[0]);
+        ci = (ci < 0) ? ci + dw[0] : ci;
+        if ( ci < gxs*refratio || ci >= gxe*refratio) continue;
+        gxi = floor(ci / refratio) - gxs;
+        for(cyi=cys;cyi<=cye;cyi++) {
+            cj = cyi % dw[1];
+            cj = (cj < 0) ? cj + dw[1] : cj;
+            if ( cj < gys*refratio || cj >= gye*refratio) continue;
+            gyi = floor(cj / refratio) - gys;
+            for(czi=czs;czi<=cze;czi++) {
+                ck = czi % dw[2];
+                ck = (ck < 0) ? ck + dw[2] : ck;
+                if ( ck < gzs*refratio || ck >= gze*refratio) continue;
+                gzi = floor(ck / refratio) - gzs;
+                    if (*(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)
+                {
+                switch (axis) {
+                  case 0: x_loc = cyi-cys; y_loc = czi-czs; break;
+                  case 1: x_loc = cxi-cxs; y_loc = czi-czs; break;
+                  case 2: x_loc = cxi-cys; y_loc = cyi-cys; break;
+                }
+                for(n=0;n<n_fields;n++){
+                    *(npy_float64*) PyArray_GETPTR2(c_data[n], x_loc, y_loc)
+                    +=  *(npy_float64*) PyArray_GETPTR3(g_data[n], gxi, gyi, gzi) 
+                        * dls[n] / refratio;
+                }
+                total += 1;
+                }
+            }
+        }
+    }
+
+    Py_DECREF(g_start);
+    Py_DECREF(c_start);
+    Py_DECREF(g_dims);
+    Py_DECREF(c_dims);
+    Py_DECREF(mask);
+    for(n=0;n<n_fields;n++) {
+        Py_DECREF(g_data[n]);
+        Py_DECREF(c_data[n]);
+    }
+    if(dls!=NULL)free(dls);
+    if(g_data!=NULL)free(g_data);
+    if(c_data!=NULL)free(c_data);
+    PyObject *status = PyInt_FromLong(total);
+    return status;
+
+_fail:
+    Py_XDECREF(g_start);
+    Py_XDECREF(c_start);
+    Py_XDECREF(g_dims);
+    Py_XDECREF(c_dims);
+    Py_XDECREF(mask);
+    if(dls!=NULL)free(dls);
+    for(n=0;n<n_fields;n++) {
+        if(g_data!=NULL)if(g_data[n]!=NULL){Py_XDECREF(g_data[n]);}
+        if(c_data!=NULL)if(c_data[n]!=NULL){Py_XDECREF(c_data[n]);}
+    }
+    if(g_data!=NULL)free(g_data);
+    if(c_data!=NULL)free(c_data);
+    return NULL;
+}
+
 static PyObject *_findContoursError;
 
 npy_int64 process_neighbors(PyArrayObject*, npy_int64, npy_int64, npy_int64);
@@ -1253,6 +1670,8 @@
     {"FindContours", Py_FindContours, METH_VARARGS},
     {"FindBindingEnergy", Py_FindBindingEnergy, METH_VARARGS},
     {"OutputFloatsToFile", Py_OutputFloatsToFile, METH_VARARGS},
+    {"FillRegion", Py_FillRegion, METH_VARARGS},
+    {"FillBuffer", Py_FillBuffer, METH_VARARGS},
     {NULL, NULL} /* Sentinel */
 };
 

Modified: trunk/yt/lagos/RTIntegrator.pyx
==============================================================================
--- trunk/yt/lagos/RTIntegrator.pyx	(original)
+++ trunk/yt/lagos/RTIntegrator.pyx	Fri Jul 17 15:44:26 2009
@@ -74,6 +74,7 @@
 @cython.boundscheck(False)
 def VoxelTraversal(np.ndarray[np.int_t, ndim=3] grid_mask,
                    np.ndarray[np.float64_t, ndim=3] grid_t,
+                   np.ndarray[np.float64_t, ndim=3] grid_dt,
                    np.ndarray[np.float64_t, ndim=1] left_edge,
                    np.ndarray[np.float64_t, ndim=1] right_edge,
                    np.ndarray[np.float64_t, ndim=1] dx,
@@ -138,28 +139,32 @@
         # If we've reached t = 1, we are done.
         grid_mask[cur_ind[0], cur_ind[1], cur_ind[2]] = 1
         if (tmax[0] > 1.0) and (tmax[1] > 1.0) and (tmax[2] > 1.0):
-            grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = 1.0 - enter_t
+            grid_dt[cur_ind[0], cur_ind[1], cur_ind[2]] = 1.0 - enter_t
             break
         ncells += 1
         if tmax[0] < tmax[1]:
             if tmax[0] < tmax[2]:
-                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[0] - enter_t
+                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = enter_t
+                grid_dt[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[0] - enter_t
                 enter_t = tmax[0]
                 tmax[0] += tdelta[0]
                 cur_ind[0] += step[0]
             else:
-                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[2] - enter_t
+                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = enter_t
+                grid_dt[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[2] - enter_t
                 enter_t = tmax[2]
                 tmax[2] += tdelta[2]
                 cur_ind[2] += step[2]
         else:
             if tmax[1] < tmax[2]:
-                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[1] - enter_t
+                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = enter_t
+                grid_dt[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[1] - enter_t
                 enter_t = tmax[1]
                 tmax[1] += tdelta[1]
                 cur_ind[1] += step[1]
             else:
-                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[2] - enter_t
+                grid_t[cur_ind[0], cur_ind[1], cur_ind[2]] = enter_t
+                grid_dt[cur_ind[0], cur_ind[1], cur_ind[2]] = tmax[2] - enter_t
                 enter_t = tmax[2]
                 tmax[2] += tdelta[2]
                 cur_ind[2] += step[2]

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Fri Jul 17 15:44:26 2009
@@ -38,6 +38,8 @@
 import warnings
 try:
     import h5py
+    if not hasattr(h5py.h5, "ArgsError"):
+        h5py.h5.ArgsError = h5py.h5.H5Error
 except ImportError:
     ytcfg["lagos", "serialization"] = "False"
     mylog.warning("No h5py. Data serialization disabled.")

Modified: trunk/yt/lagos/hop/hop_numpy.h
==============================================================================
--- trunk/yt/lagos/hop/hop_numpy.h	(original)
+++ trunk/yt/lagos/hop/hop_numpy.h	Fri Jul 17 15:44:26 2009
@@ -3,9 +3,9 @@
 #include "numpy/ndarrayobject.h"
 
 #define NP_DENS(kd, in) \
-    *(npy_float64*)PyArray_GETPTR1(kd->np_densities, kd->p[in].np_index)
+    (*(npy_float64*)PyArray_GETPTR1(kd->np_densities, kd->p[in].np_index))
 #define NP_POS(kd, in, dim) \
-    *(npy_float64*)PyArray_GETPTR1(kd->np_pos[dim], kd->p[in].np_index)
+    (*(npy_float64*)PyArray_GETPTR1(kd->np_pos[dim], kd->p[in].np_index))
 #define NP_MASS(kd, in) \
     (*(npy_float64*)PyArray_GETPTR1(kd->np_masses, kd->p[in].np_index))/kd->totalmass
 

Modified: trunk/yt/lagos/setup.py
==============================================================================
--- trunk/yt/lagos/setup.py	(original)
+++ trunk/yt/lagos/setup.py	Fri Jul 17 15:44:26 2009
@@ -20,7 +20,7 @@
     config.make_config_py() # installs __config__.py
     config.make_svn_version_py()
     config.add_extension("PointCombine", "yt/lagos/PointCombine.c", libraries=["m"])
-    #config.add_extension("RTIntegrator", "yt/lagos/RTIntegrator.c")
+    config.add_extension("RTIntegrator", "yt/lagos/RTIntegrator.c")
     config.add_extension("Interpolators", "yt/lagos/Interpolators.c")
     #config.add_extension("DepthFirstOctree", "yt/lagos/DepthFirstOctree.c")
     config.add_subpackage("hop")



More information about the yt-svn mailing list