[Yt-svn] yt-commit r384 - in trunk: tests yt/lagos

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Mon Mar 3 13:06:41 PST 2008


Author: mturk
Date: Mon Mar  3 13:06:39 2008
New Revision: 384
URL: http://yt.spacepope.org/changeset/384

Log:
Okay, this one might break things, but I sure hope it doesn't.

 * I have rewritten the HDF5 IO to be lighter-weight than previously, and less
   dependent on PyTables and the attendent overhead.
 * Projections now use a single entry-point to a PointCombine C function.
   Additionally, they have been slightly rewritten to avoid modifying the grid
   objects, and to store all values locally instead.
 * Projections can now project multiple fields at once.
 * Speed improvements in projections.
 * The field 'ones' no longer requires a read of "Density."  This is a model I
   should apply elsewhere.
 * Tests include a couple modifications, and checking for projection
   correctness when projecting multiple fields.

The things to watch out for, in terms of breakage, are all with the compilation.  I think.  I have tested it quite a bit with both unit tests and real-world tests.

Also, right now, it's only the full-grid reading that relies on my new HDF5 wrapper.  I will be working on making slices work with it as well.  That has the potential to speed up slicing on HDF5 *considerably.*



Added:
   trunk/yt/lagos/HDF5LightReader.c
Modified:
   trunk/tests/test_lagos.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/DataReadingFuncs.py
   trunk/yt/lagos/DerivedFields.py
   trunk/yt/lagos/PointCombine.c
   trunk/yt/lagos/__init__.py
   trunk/yt/lagos/setup.py

Modified: trunk/tests/test_lagos.py
==============================================================================
--- trunk/tests/test_lagos.py	(original)
+++ trunk/tests/test_lagos.py	Mon Mar  3 13:06:39 2008
@@ -35,7 +35,11 @@
             os.unlink(i)
 
     def tearDown(self):
+        if hasattr(self,'data'): del self.data
+        if hasattr(self,'region'): del self.region
+        if hasattr(self,'ind_to_get'): del self.ind_to_get
         del self.OutputFile, self.hierarchy
+        
 
 class TestHierarchy(LagosTestingBase, unittest.TestCase):
     def testGetHierarchy(self):
@@ -85,6 +89,14 @@
         mr = r["CellMass"].sum() # Testing adding new field transparently
         self.assertEqual(ms,mr)  # Asserting equality between the two
 
+    def testProjectionCorrectnessMultipleFields(self):
+        p = self.hierarchy.proj(0,["Density","Ones"]) # Unweighted
+        self.assertTrue(na.all(p["Ones"] == 1.0))
+
+    def testProjectionMakingMultipleFields(self):
+        p = self.hierarchy.proj(0,["Density","Temperature","Ones"],weight_field="Ones") # Unweighted
+        self.assertEqual(len(p.data.keys()), 7)
+
     def testProjectionMaking(self):
         p = self.hierarchy.proj(0,"Density") # Unweighted
         p = self.hierarchy.proj(1,"Temperature","Density") # Weighted
@@ -94,7 +106,7 @@
         # Now we test that we get good answers
         for axis in range(3):
             p = self.hierarchy.proj(axis, "Ones") # Derived field
-            self.assertAlmostEqual(p["Ones"].prod(), 1.0, 7)
+            self.assertTrue(na.all(p["Ones"] == 1.0))
 
 # Now we test each datatype in turn
 

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Mon Mar  3 13:06:39 2008
@@ -260,7 +260,7 @@
         # We take a 3-tuple of the coordinate we want to slice through, as well
         # as the axis we're slicing along
         self._get_list_of_grids()
-        if not self.has_key('dx'):
+        if not self.has_key('pdx'):
             self._generate_coords()
         if fields == None:
             fields_to_get = self.fields
@@ -458,14 +458,6 @@
         dataVals = dv.ravel()[cm]
         return dataVals
 
-    def _generate_field_in_grids(self, field, num_ghost_zones=0):
-        for grid in self._grids:
-            self.__touch_grid_field(grid, field)
-
-    @restore_grid_state
-    def __touch_grid_field(self, grid, field):
-        grid[field]
-
 class EnzoCuttingPlaneBase(Enzo2DData):
     """
     EnzoCuttingPlane is an oblique plane through the data,
@@ -610,8 +602,13 @@
         else:
             self.type="SUM"
             self.func = na.sum
+        self.__retval_coords = {}
+        self.__retval_fields = {}
+        self.__retval_coarse = {}
+        self.__overlap_masks = {}
+        self._temp = {}
         if not self._deserialize():
-            self.__calculate_memory()
+            self.__calculate_overlap()
             if self.hierarchy.data_style == 6 and False:
                 self.__cache_data()
             self._refresh_data()
@@ -633,36 +630,22 @@
         self.hierarchy.grid.readDataFast = readDataPackedHandle
 
     @time_execution
-    def __calculate_memory(self):
-        level_mem = {}
-        h = self.hierarchy
+    def __calculate_overlap(self):
         s = self.source
+        mylog.info("Generating overlap masks")
         i = 0
-        mylog.info("Calculating memory usage")
         for level in range(self._max_level+1):
-            level_mem[level] = 0
             mylog.debug("Examining level %s", level)
             grids = s.levelIndices[level]
-            num_grids = len(grids)
             RE = s.gridRightEdge[grids]
             LE = s.gridLeftEdge[grids]
             for grid in s._grids[grids]:
                 if (i%1e3) == 0:
                     mylog.debug("Reading and masking %s / %s", i, len(s._grids))
-                grid._generate_overlap_masks(self.axis, LE, RE)
-                grid._overlap_grids[self.axis] = \
-                  s._grids[grids][na.where(grid.overlap_masks[self.axis])]
-                level_mem[level] += \
-                          grid.ActiveDimensions.prod() / \
-                          grid.ActiveDimensions[self.axis]
+                self.__overlap_masks[grid.id] = \
+                    grid._generate_overlap_masks(self.axis, LE, RE)
                 i += 1
-        for level in range(self._max_level+1):
-            gI = s.levelIndices[level]
-            mylog.debug("%s cells and %s grids for level %s", \
-                level_mem[level], len(gI), level)
-        mylog.debug("We need %s cells total",
-                    na.add.reduce(level_mem.values()))
-        self.__memory_per_level = level_mem
+        mylog.info("Finished calculating overlap.")
 
     def _serialize(self):
         mylog.info("Serializing data...")
@@ -687,171 +670,158 @@
         self[self.fields[0]] = array[4,:]
         return True
 
-    def __project_level(self, level, field):
+    def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
         zero_out = (level != self._max_level)
         pbar = get_pbar('Projecting  level % 2i / % 2i ' \
                           % (level, self._max_level), len(grids_to_project))
         for pi, grid in enumerate(grids_to_project):
-            grid.retVal = self._project_grid(grid, field, zero_out)
+            g_coords, g_fields = self._project_grid(grid, fields, zero_out)
+            for fi, field in enumerate(fields):
+                dl = 1.0
+                if field in fieldInfo and fieldInfo[field].line_integral:
+                    dl = just_one(grid['d%s' % axis_names[self.axis]])
+                g_fields[fi] *= dl
+            self.__retval_coords[grid.id] = g_coords
+            self.__retval_fields[grid.id] = g_fields
             pbar.update(pi)
         pbar.finish()
         self.__combine_grids_on_level(level) # In-place
         if level > 0 and level <= self._max_level:
             self.__refine_to_level(level) # In-place
-        all_data = [ [grid.retVal[j] for grid in grids_to_project] for j in range(5)]
+        coord_data = []
+        field_data = []
         for grid in grids_to_project:
-            cI = na.where(grid.retVal[3]==0) # Where childmask = 0
-            grid.coarseData = [grid.retVal[j][cI] for j in range(5)]
-        levelData = [na.concatenate(all_data[i]) for i in range(5)]
-        dblI = na.where((levelData[0]>-1) & (levelData[3]==1))
+            coarse = self.__retval_coords[grid.id][2]==0 # Where childmask = 0
+            fine = ~coarse
+            coord_data.append([pi[fine] for pi in self.__retval_coords[grid.id]])
+            field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
+            self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
+            self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
+        coord_data = na.concatenate(coord_data, axis=1)
+        field_data = na.concatenate(field_data, axis=1)
         if self._weight != None:
-            weightedData = levelData[2][dblI] / levelData[4][dblI]
-        else:
-            weightedData = levelData[2][dblI]
-        mylog.debug("Level %s done: %s final of %s", \
-                   level, len(dblI[0]), \
-                   levelData[0].shape[0])
-        dx = grids_to_project[0].dx * na.ones(len(dblI[0]), dtype='float64')
-        return [levelData[0][dblI], levelData[1][dblI], weightedData, dx]
+            field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
+        mylog.info("Level %s done: %s final", \
+                   level, coord_data.shape[1])
+        dx = grids_to_project[0].dx# * na.ones(coord_data.shape[0], dtype='float64')
+        return coord_data, dx, field_data
 
     def __cleanup_level(self, level):
+        pass
         grids_to_project = self.source.select_grids(level)
-        all_data = []
+        coord_data = []
+        field_data = []
         for grid in grids_to_project:
-            if hasattr(grid,'coarseData'):
-                if self._weight is not None:
-                    weightedData = grid.coarseData[2] / grid.coarseData[4]
-                else:
-                    weightedData = grid.coarseData[2]
-                all_data.append([grid.coarseData[0], grid.coarseData[1],
-                    weightedData,
-                    na.ones(grid.coarseData[0].shape,dtype='float64')*grid.dx])
+            if self.__retval_coords[grid.id][0].size == 0: continue
+            if self._weight is not None:
+                weightedData = grid.coarseData[2] / grid.coarseData[4]
+            else:
+                weightedData = grid.coarseData[2]
+            all_data.append([grid.coarseData[0], grid.coarseData[1],
+                weightedData,
+                na.ones(grid.coarseData[0].shape,dtype='float64')*grid.dx])
         return na.concatenate(all_data, axis=1)
 
-    def __setup_weave_dict_combine(self, grid1, grid2):
-        # Recall: x, y, val, mask, weight
-        my_dict = {}
-        my_dict['g1data_x'] = grid1.retVal[0]
-        my_dict['g1data_y'] = grid1.retVal[1]
-        my_dict['g1data_vals'] = grid1.retVal[2]
-        my_dict['g1data_mask'] = grid1.retVal[3]
-        my_dict['g1data_wgt'] = grid1.retVal[4]
-        my_dict['g1points'] = grid1.retVal[0].size
-
-        my_dict['g2data_x'] = grid2.retVal[0]
-        my_dict['g2data_y'] = grid2.retVal[1]
-        my_dict['g2data_vals'] = grid2.retVal[2]
-        my_dict['g2data_mask'] = grid2.retVal[3]
-        my_dict['g2data_wgt'] = grid2.retVal[4]
-        my_dict['g2points'] = grid2.retVal[0].size
-        return my_dict
-
     def __combine_grids_on_level(self, level):
         grids = self.source.select_grids(level)
+        grids_i = self.source.levelIndices[level]
         pbar = get_pbar('Combining   level % 2i / % 2i ' \
                           % (level, self._max_level), len(grids))
         # We have an N^2 check, so we try to be as quick as possible
         # and to skip as many as possible
         for pi, grid1 in enumerate(grids):
             pbar.update(pi)
-            if grid1.retVal[0].shape[0] == 0: continue
-            for grid2 in grid1._overlap_grids[self.axis].tolist():
-                if grid2.retVal[0].shape[0] == 0 \
+            if self.__retval_coords[grid1.id][0].shape[0] == 0: continue
+            for grid2 in self.source._grids[grids_i][self.__overlap_masks[grid1.id]]:
+                if self.__retval_coords[grid2.id][0].shape[0] == 0 \
                   or grid1.id == grid2.id:
                     continue
-                my_dict = self.__setup_weave_dict_combine(grid1, grid2)
-                weave.inline(ProjectionCombineSameLevel,
-                             my_dict.keys(), local_dict=my_dict,
-                             compiler='gcc',
-                             type_converters=converters.blitz,
-                             auto_downcast=0, verbose=2)
-                goodI = na.where(grid2.retVal[0] > -1)
-                grid2.retVal = [grid2.retVal[i][goodI] for i in range(5)]
+                args = [] # First is source, then destination
+                args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
+                args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
+                args.append(1) # Refinement factor
+                kk = PointCombine.CombineGrids(*args)
+                goodI = na.where(self.__retval_coords[grid2.id][0] > -1)
+                self.__retval_coords[grid2.id] = \
+                    [coords[goodI] for coords in self.__retval_coords[grid2.id]]
+                self.__retval_fields[grid2.id] = \
+                    [fields[goodI] for fields in self.__retval_fields[grid2.id]]
         pbar.finish()
 
-    def __setup_weave_dict_refine(self, grid1, grid2):
-        # Recall: x, y, val, mask, weight
-        my_dict = {}
-        my_dict['finedata_x'] = grid1.retVal[0]
-        my_dict['finedata_y'] = grid1.retVal[1]
-        my_dict['finedata_vals'] = grid1.retVal[2]
-        my_dict['finedata_wgt'] = grid1.retVal[4]
-        my_dict['fpoints'] = grid1.retVal[0].size
-        my_dict['coarsedata_x'] = grid2.coarseData[0]
-        my_dict['coarsedata_y'] = grid2.coarseData[1]
-        my_dict['coarsedata_vals'] = grid2.coarseData[2]
-        my_dict['coarsedata_wgt'] = grid2.coarseData[4]
-        my_dict['cpoints'] = grid2.coarseData[0].size
-        my_dict['flagged'] = na.zeros(grid2.coarseData[0].size)
-        my_dict['rf'] = int(grid2.dx/grid1.dx)
-        return my_dict
-
     def __refine_to_level(self, level):
         grids = self.source.select_grids(level)
+        grids_up = self.source.levelIndices[level-1]
         pbar = get_pbar('Refining to level % 2i / % 2i ' \
                           % (level, self._max_level), len(grids))
         for pi, grid1 in enumerate(grids):
             pbar.update(pi)
-            o_grids = grid1.Parent._overlap_grids[self.axis].tolist()
-            for grid2 in o_grids:
-                if grid2.coarseData[0].shape[0] == 0: continue # Already refined
-                my_dict = self.__setup_weave_dict_refine(grid1, grid2)
-                weave.inline(ProjectionRefineCoarseData,
-                             my_dict.keys(), local_dict=my_dict,
-                             compiler='gcc',
-                             type_converters=converters.blitz,
-                             auto_downcast=0, verbose=2)
-                goodI = na.where(my_dict['flagged'] == 0)
-                grid2.coarseData = [grid2.coarseData[i][goodI] for i in range(5)]
+            for grid2 in self.source._grids[grids_up][self.__overlap_masks[grid1.Parent.id]]:
+                if self.__retval_coords[grid2.id][0].shape[0] == 0: continue
+                args = []
+                args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
+                args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
+                args.append(int(grid2.dx / grid1.dx))
+                kk = PointCombine.CombineGrids(*args)
+                goodI = (self.__retval_coords[grid2.id][0] > -1)
+                self.__retval_coords[grid2.id] = \
+                    [coords[goodI] for coords in self.__retval_coords[grid2.id]]
+                self.__retval_fields[grid2.id] = \
+                    [fields[goodI] for fields in self.__retval_fields[grid2.id]]
         for grid1 in self.source.select_grids(level-1):
-            if not self._check_region and grid1.coarseData[0].shape[0] != 0:
+            if not self._check_region and self.__retval_coords[grid1.id][0].size != 0:
                 mylog.error("Something messed up, and %s still has %s points of data",
-                            grid1, grid1.coarseData[0].size)
-                print grid1.coarseData[0]
-                raise ValueError(grid1)
+                            grid1, self.__retval_coords[grid1.id][0].size)
+                raise ValueError(grid1, self.__retval_coords[grid1.id])
         pbar.finish()
 
     @time_execution
-    def get_data(self, field = None):
-        if not field: field = self.fields[0]
-        all_data = []
-        s = self.source
+    def get_data(self, fields = None):
+        if fields is None: fields = ensure_list(self.fields)
+        coord_data = []
+        field_data = []
+        dxs = []
         for level in range(0, self._max_level+1):
-            all_data.append(self.__project_level(level, field))
-        if self._check_region:
-            for level in range(0, self._max_level+1):
-                check=self.__cleanup_level(level)
+            my_coords, my_dx, my_fields = self.__project_level(level, fields)
+            coord_data.append(my_coords)
+            field_data.append(my_fields)
+            dxs.append(my_dx * na.ones(my_coords.shape[1], dtype='float64'))
+            if self._check_region and False:
+                check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
-        all_data = na.concatenate(all_data, axis=1)
+            # Now, we should clean up after ourselves...
+            for grid in self.source.select_grids(level - 1):
+                grid.clear_data()
+                del self.__retval_coords[grid.id]
+                del self.__retval_fields[grid.id]
+                del self.__overlap_masks[grid.id]
+        coord_data = na.concatenate(coord_data, axis=1)
+        field_data = na.concatenate(field_data, axis=1)
+        dxs = na.concatenate(dxs, axis=1)
         # We now convert to half-widths and center-points
-        self['pdx'] = all_data[3,:]
-        self['px'] = (all_data[0,:]+0.5) * self['pdx']
-        self['py'] = (all_data[1,:]+0.5) * self['pdx']
-        self['pdx'] *= 0.5
-        self['pdy'] = self['pdx'].copy()
-        self[field] = all_data[2,:]
-        # Now, we should clean up after ourselves...
-        [grid.clear_data() for grid in s._grids]
-
-    def _project_grid(self, grid, field, zero_out):
-        if self._weight == None:
-            masked_data = self._get_data_from_grid(grid, field)
-            weight_data = na.ones(masked_data.shape)
+        self.data['pdx'] = dxs
+        self.data['px'] = (coord_data[0,:]+0.5) * self['pdx']
+        self.data['py'] = (coord_data[1,:]+0.5) * self['pdx']
+        self.data['pdx'] *= 0.5
+        self.data['pdy'] = self.data['pdx'].copy()
+        for fi, field in enumerate(fields):
+            self[field] = field_data[fi,:]
+
+    def add_fields(self, fields, weight = "CellMassMsun"):
+        pass
+
+    def _project_grid(self, grid, fields, zero_out):
+        if self._weight is None:
+            weight_data = na.ones(grid.ActiveDimensions)
         else:
             weight_data = self._get_data_from_grid(grid, self._weight)
-            masked_data = self._get_data_from_grid(grid, field) * weight_data
-        if zero_out:   # Not sure how to do this here
-            masked_data[grid.child_indices] = 0
-            weight_data[grid.child_indices] = 0
-        #@todo: Fix the func to set up a path length too
+        if zero_out: weight_data[grid.child_indices] = 0
+        # if we zero it out here, then we only have to zero out the weight!
+        masked_data = [self._get_data_from_grid(grid, field) * weight_data
+                       for field in fields]
         dl = 1.0
-        if fieldInfo.has_key(field) and fieldInfo[field].line_integral:
-            dl = just_one(grid['d%s' % axis_names[self.axis]])
-        dx = just_one(grid['d%s' % axis_names[x_dict[self.axis]]])
-        dy = just_one(grid['d%s' % axis_names[y_dict[self.axis]]])
-        full_proj = self.func(masked_data,axis=self.axis)*dl
-        weight_proj = self.func(weight_data,axis=self.axis)*dl
+        full_proj = [self.func(field,axis=self.axis) for field in masked_data]
+        weight_proj = self.func(weight_data,axis=self.axis)
         if self._check_region and not self.source._is_fully_enclosed(grid):
             used_data = self._get_points_in_region(grid)
             used_points = na.where(na.logical_or.reduce(used_data, self.axis))
@@ -860,15 +830,15 @@
         if zero_out:
             subgrid_mask = na.logical_and.reduce(grid.child_mask, self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj.shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj.shape)]
+            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
-        xpoints = xind + (start_index[x_dict[self.axis]])
-        ypoints = yind + (start_index[y_dict[self.axis]])
-        return [xpoints, ypoints,
-                full_proj[used_points].ravel(),
+        xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
+        ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
+        return ([xpoints, ypoints,
                 subgrid_mask[used_points].ravel(),
-                weight_proj[used_points].ravel()]
+                weight_proj[used_points].ravel()],
+                [data[used_points].ravel() for data in full_proj])
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
@@ -883,6 +853,7 @@
         else:
             bad_points = 1.0
         d = grid[field] * bad_points
+        if grid.id == 1: self._temp[grid.id] = d
         return d
 
 class Enzo3DData(EnzoData):

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Mon Mar  3 13:06:39 2008
@@ -212,8 +212,8 @@
         cond2 = self.LeftEdge[x] <= RE[:,x]
         cond3 = self.RightEdge[y] >= LE[:,y]
         cond4 = self.LeftEdge[y] <= RE[:,y]
-        self.overlap_masks[axis]=na.logical_and(na.logical_and(cond1, cond2), \
-                                                na.logical_and(cond3, cond4))
+        return ((cond1 & cond2) & (cond3 & cond4))
+   
     def __repr__(self):
         return "Grid_%04i" % (self.id)
 

Modified: trunk/yt/lagos/DataReadingFuncs.py
==============================================================================
--- trunk/yt/lagos/DataReadingFuncs.py	(original)
+++ trunk/yt/lagos/DataReadingFuncs.py	Mon Mar  3 13:06:39 2008
@@ -64,7 +64,12 @@
     for set in sets:
         self[set] = self.readDataFast(set)
 
+import gc
 def readDataHDF5(self, field):
+    t = HDF5LightReader.ReadData(self.filename, "/%s" % field).swapaxes(0,2)
+    return t
+
+def tables_readDataHDF5(self, field):
     """
     Reads a field from an HDF5 file.  Should only be called as
     EnzoGridInstance.readData()
@@ -72,12 +77,14 @@
     @param field: field to read
     @type field: string
     """
-    f = tables.openFile(self.filename, nodeCacheSize=1)
-    t = f.getNode("/", field).read().astype("float64")
+    f = tables.openFile(self.filename)#, nodeCacheSize=1)
+    n = f.getNode("/", field)
+    t = n.read().astype("float64")
     try:
         t = t.swapaxes(0,2)
     except:
         pass
+    n.close()
     f.close()
     return t
 

Modified: trunk/yt/lagos/DerivedFields.py
==============================================================================
--- trunk/yt/lagos/DerivedFields.py	(original)
+++ trunk/yt/lagos/DerivedFields.py	Mon Mar  3 13:06:39 2008
@@ -177,7 +177,6 @@
         doesnt_have = []
         for p in self.prop:
             if not hasattr(data,p):
-                print type(data), p
                 doesnt_have.append(p)
         if len(doesnt_have) > 0:
             raise NeedsProperty(doesnt_have)
@@ -275,10 +274,9 @@
 add_field("OnesOverDx")
 
 def _Ones(field, data):
-    return na.ones(data["Density"].shape,
-                   dtype=data["Density"].dtype)
-add_field("Ones")
-add_field("CellsPerBin", function=_Ones)
+    return na.ones(data.ActiveDimensions, dtype='float64')
+add_field("Ones", validators=[ValidateSpatial(0)])
+add_field("CellsPerBin", function=_Ones, validators=[ValidateSpatial(0)])
 
 def _SoundSpeed(field, data):
     return ( data.pf["Gamma"]*data["Pressure"] / \

Added: trunk/yt/lagos/HDF5LightReader.c
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/HDF5LightReader.c	Mon Mar  3 13:06:39 2008
@@ -0,0 +1,168 @@
+/************************************************************************
+* Copyright (C) 2007 Matthew Turk.  All Rights Reserved.
+*
+* This file is part of yt.
+*
+* yt is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 3 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*
+************************************************************************/
+
+
+//
+// HDF5_LightReader
+//   A module for light-weight reading of HDF5 files
+//
+
+#include "Python.h"
+
+#include <stdio.h>
+#include <math.h>
+#include <signal.h>
+#include <ctype.h>
+#include "H5LT.h"
+#include "hdf5.h"
+
+#include "numpy/ndarrayobject.h"
+
+
+static PyObject *_hdf5ReadError;
+
+static PyObject *
+Py_ReadHDF5DataSet(PyObject *obj, PyObject *args)
+{
+    char *filename, *nodename;
+
+    hsize_t *my_dims;
+    hid_t file_id;
+    herr_t my_error;
+    H5T_class_t class_id;
+    size_t type_size;
+    int my_typenum, my_rank, i, my_size;
+    void *my_data;
+    PyArrayObject *my_array;
+
+    if (!PyArg_ParseTuple(args, "ss",
+            &filename, &nodename))
+        return PyErr_Format(_hdf5ReadError,
+               "ReadHDF5DataSet: Invalid parameters.");
+
+    //char* filename = *ofilename;
+    //char* nodename = *onodename;
+
+    file_id = H5Fopen (filename, H5F_ACC_RDONLY, H5P_DEFAULT); 
+    
+    my_error = H5LTget_dataset_ndims ( file_id, nodename, &my_rank );
+    if(my_error) {
+        PyErr_Format(_hdf5ReadError,
+                 "ReadHDF5DataSet: Problem getting dataset info (%s, %s).",
+                                    filename, nodename);
+        goto _fail;
+    }
+
+    my_dims = malloc(sizeof(hsize_t) * my_rank);
+    my_error = H5LTget_dataset_info ( file_id, nodename,
+                my_dims, &class_id, &type_size );
+    if(my_error) {
+        PyErr_Format(_hdf5ReadError,
+                 "ReadHDF5DataSet: Problem getting dataset info (%s, %s).",
+                                    filename, nodename);
+        goto _fail;
+    }
+
+    my_size = 1;
+    npy_intp *dims = malloc(my_rank * sizeof(npy_intp));
+    for (i = 0; i < my_rank; i++) {
+      dims[i] = (npy_intp) my_dims[i];
+      my_size *= my_dims[i];
+    }
+
+    if (!(class_id == H5T_FLOAT)){
+      PyErr_Format(_hdf5ReadError,
+          "ReadHDF5DataSet: Unrecognized datatype, size %i.", type_size);
+      goto _fail;
+    }
+
+    /*
+    switch (type_size) {
+      case 4:
+        fprintf(stderr, "Reading (%i) %i\n", my_size, type_size);
+        my_typenum = NPY_FLOAT32;
+        H5LTread_dataset_float(file_id, nodename, my_data);
+        break;
+      case 8:
+        fprintf(stderr, "Reading (%i) %i\n", my_size, type_size);
+        my_typenum = NPY_FLOAT64;
+        H5LTread_dataset_double(file_id, nodename, my_data);
+        break;
+      default:
+        PyErr_Format(_hdf5ReadError,
+            "ReadHDF5DataSet: Unrecognized datatype, size %i.", type_size);
+        goto _fail;
+        break; //haha goto!
+    }
+    */
+    my_array = (PyArrayObject *) PyArray_SimpleNewFromDescr(my_rank, dims,
+                PyArray_DescrFromType(NPY_DOUBLE));
+
+    H5LTread_dataset_double(file_id, nodename, (void *) my_array->data);
+    H5Fclose(file_id);
+    /*
+    my_array = (PyArrayObject *) PyArray_SimpleNewFromData(my_rank, dims,
+                                    NPY_FLOAT64, (void *)my_data);
+    */
+
+    PyArray_UpdateFlags(my_array, NPY_OWNDATA | my_array->flags);
+    // 'N' does not increase the reference count
+    PyObject *return_value = Py_BuildValue("N", my_array);
+
+    free(dims);
+    free(my_dims);
+
+    return return_value;
+
+    _fail:
+      Py_XDECREF(my_array);
+      if(file_id) H5Fclose(file_id);
+      if(my_data) free(my_data);
+      if(my_dims) free(my_dims);
+      if(dims) free(dims);
+      return NULL;
+}
+
+static PyMethodDef _hdf5LightReaderMethods[] = {
+    {"ReadData", Py_ReadHDF5DataSet, METH_VARARGS},
+    {NULL, NULL} /* Sentinel */
+};
+
+/* platform independent*/
+#ifdef MS_WIN32
+__declspec(dllexport)
+#endif
+
+void initHDF5LightReader(void)
+{
+    PyObject *m, *d;
+    m = Py_InitModule("HDF5LightReader", _hdf5LightReaderMethods);
+    d = PyModule_GetDict(m);
+    _hdf5ReadError = PyErr_NewException("HDF5LightReader.ReadingError", NULL, NULL);
+    PyDict_SetItemString(d, "error", _hdf5ReadError);
+    import_array();
+}
+
+/*
+ * Local Variables:
+ * mode: C
+ * c-file-style: "python"
+ * End:
+ */

Modified: trunk/yt/lagos/PointCombine.c
==============================================================================
--- trunk/yt/lagos/PointCombine.c	(original)
+++ trunk/yt/lagos/PointCombine.c	Mon Mar  3 13:06:39 2008
@@ -44,438 +44,226 @@
 #define max(A,B) ((A) > (B) ? (A) : (B))
 #define min(A,B) ((A) < (B) ? (A) : (B))
 
-static PyObject *_combineError;
-
-static void
-RefineCoarseData(long fpoints, npy_int64 *finedata_x, npy_int64 *finedata_y, npy_float64 *finedata_vals, npy_float64 *finedata_wgt,
-                 long cpoints, npy_int64 *coarsedata_x, npy_int64 *coarsedata_y, npy_float64 *coarsedata_vals, npy_float64 *coarsedata_wgt,
-                 int refinementFactor, int *totalRefined)
-{
-    long fi, ci;
-
-    int flagged[cpoints], tr = 0;
-
-    long double rf = refinementFactor;
-
-    npy_int64 coarseCell_x, coarseCell_y;
-
-    for (ci=0; ci<cpoints; ci++) flagged[ci]=0;
-
-    for (fi = 0; fi < fpoints; fi++) {
-        coarseCell_x = floorl((finedata_x[fi])/rf);
-        coarseCell_y = floorl((finedata_y[fi])/rf);
-        for (ci = 0; ci < cpoints; ci++) {
-            if ((coarseCell_x == coarsedata_x[ci]) &&
-                (coarseCell_y == coarsedata_y[ci])) {
-                    tr += 1;
-                    finedata_vals[fi] = COMB(coarsedata_vals[ci], finedata_vals[fi]);
-                    finedata_wgt[fi]  = COMB(coarsedata_wgt[ci],  finedata_wgt[fi]);
-                    flagged[ci] = 1;
-                    break;  // Each fine cell maps to one and only one coarse
-                            // cell
-            }
-        }
-    }
-    *totalRefined = tr;
-
-    for (ci=0; ci<cpoints; ci++) {
-        if (flagged[ci]==1) {
-            coarsedata_x[ci]=-1;
-            coarsedata_y[ci]=-1;
-            coarsedata_vals[ci]=0.0;
-            coarsedata_wgt[ci]=0.0;
-        }
-    }
-}
+static PyObject *_combineGridsError;
 
 static PyObject *
-Py_RefineCoarseData(PyObject *obj, PyObject *args)
+Py_CombineGrids(PyObject *obj, PyObject *args)
 {
-    PyObject   *ofinedata_x, *ofinedata_y, *ofinedata_vals, *ofinedata_wgt,
-               *ocoarsedata_x, *ocoarsedata_y, *ocoarsedata_vals, *ocoarsedata_wgt,
-               *oreturn_index;
-    PyArrayObject   *finedata_x, *finedata_y, *finedata_vals, *finedata_wgt,
-	    *coarsedata_x, *coarsedata_y, *coarsedata_vals, *coarsedata_wgt;
-    int refinementFactor;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOOOi",
-        &ofinedata_x, &ofinedata_y, &ofinedata_vals, &ofinedata_wgt,
-        &ocoarsedata_x, &ocoarsedata_y, &ocoarsedata_vals, &ocoarsedata_wgt,
-        &refinementFactor))
-        return PyErr_Format(_combineError,
-                    "CombineData: Invalid parameters.");
-
-    /* Align, Byteswap, Contiguous, Typeconvert */
-
-    finedata_x    = (PyArrayObject *) PyArray_FromAny(ofinedata_x   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    finedata_y    = (PyArrayObject *) PyArray_FromAny(ofinedata_y   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    finedata_vals = (PyArrayObject *) PyArray_FromAny(ofinedata_vals, PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    finedata_wgt  = (PyArrayObject *) PyArray_FromAny(ofinedata_wgt , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-
-    coarsedata_x    = (PyArrayObject *) PyArray_FromAny(ocoarsedata_x   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    coarsedata_y    = (PyArrayObject *) PyArray_FromAny(ocoarsedata_y   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    coarsedata_vals = (PyArrayObject *) PyArray_FromAny(ocoarsedata_vals, PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    coarsedata_wgt  = (PyArrayObject *) PyArray_FromAny(ocoarsedata_wgt , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-
-    if (!finedata_x || !finedata_y || !finedata_vals || !finedata_wgt
-     || !coarsedata_x || !coarsedata_y || !coarsedata_vals || !coarsedata_wgt) {
-        /*PyErr_Format( _combineError,
-                  "CombineData: error converting array inputs.");
-        */
-        if (!coarsedata_x)
-          PyErr_Format( _combineError,
-              "CombineData: error converting coarsedata_x");
-        if (!coarsedata_y)
-          PyErr_Format( _combineError,
-              "CombineData: error converting coarsedata_y");
-        if (!coarsedata_vals)
-          PyErr_Format( _combineError,
-              "CombineData: error converting coarsedata_vals");
-        if (!coarsedata_wgt)
-          PyErr_Format( _combineError,
-              "CombineData: error converting coarsedata_wgt");
-        if (!finedata_x)
-          PyErr_Format( _combineError,
-              "CombineData: error converting finedata_x");
-        if (!finedata_y)
-          PyErr_Format( _combineError,
-              "CombineData: error converting finedata_y");
-        if (!finedata_vals)
-          PyErr_Format( _combineError,
-              "CombineData: error converting finedata_vals");
-        if (!finedata_wgt)
-          PyErr_Format( _combineError,
-              "CombineData: error converting finedata_wgt");
-        goto _fail;
-    }
-
-/*
-    if (!NA_ShapeEqual(finedata_x, finedata_y)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and y finedata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(finedata_x, finedata_vals)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and vals finedata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(finedata_x, finedata_wgt)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and weight finedata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(coarsedata_x, coarsedata_y)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and y coarsedata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(coarsedata_x, coarsedata_vals)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and vals coarsedata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(coarsedata_x, coarsedata_wgt)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and weight coarsedata numarray need identitcal shapes.");
-        goto _fail;
-    }
-*/
+    PyObject    *ogrid_src_x, *ogrid_src_y, *ogrid_src_vals,
+        *ogrid_src_mask, *ogrid_src_wgt;
+    PyObject    *ogrid_dst_x, *ogrid_dst_y, *ogrid_dst_vals,
+        *ogrid_dst_mask, *ogrid_dst_wgt;
+
+    PyArrayObject    *grid_src_x, *grid_src_y, **grid_src_vals,
+            *grid_src_mask, *grid_src_wgt;
+    PyArrayObject    *grid_dst_x, *grid_dst_y, **grid_dst_vals,
+            *grid_dst_mask, *grid_dst_wgt;
 
-    int totalRefined;
+    int NumArrays, src_len, dst_len, refinement_factor;
 
-    RefineCoarseData(finedata_vals->dimensions[0],
-                     (npy_int64 *) finedata_x->data,
-                     (npy_int64 *) finedata_y->data,
-                     (npy_float64 *) finedata_vals->data,
-                     (npy_float64 *) finedata_wgt->data,
-                     coarsedata_vals->dimensions[0],
-                     (npy_int64 *) coarsedata_x->data,
-                     (npy_int64 *) coarsedata_y->data,
-                     (npy_float64 *) coarsedata_vals->data,
-                     (npy_float64 *) coarsedata_wgt->data,
-                     refinementFactor, &totalRefined);
-
-    Py_XDECREF(finedata_x);
-    Py_XDECREF(finedata_y);
-    Py_XDECREF(finedata_vals);
-    Py_XDECREF(finedata_wgt);
-    Py_XDECREF(coarsedata_x);
-    Py_XDECREF(coarsedata_y);
-    Py_XDECREF(coarsedata_vals);
-    Py_XDECREF(coarsedata_wgt);
-
-    /* Align, Byteswap, Contiguous, Typeconvert */
-    oreturn_index = PyInt_FromLong((long)totalRefined);
-    return oreturn_index;
-
-  _fail:
-    Py_XDECREF(finedata_x);
-    Py_XDECREF(finedata_y);
-    Py_XDECREF(finedata_vals);
-    Py_XDECREF(finedata_wgt);
-    Py_XDECREF(coarsedata_x);
-    Py_XDECREF(coarsedata_y);
-    Py_XDECREF(coarsedata_vals);
-    Py_XDECREF(coarsedata_wgt);
-
-    return NULL;
-}
-
-static void
-CombineData(long dpoints, npy_int64 *alldata_x, npy_int64 *alldata_y, npy_float64 *alldata_vals, npy_int64 *alldata_mask, npy_float64 *alldata_wgt,
-	    long gpoints, npy_int64 *griddata_x, npy_int64 *griddata_y, npy_float64 *griddata_vals, npy_int64 *griddata_mask, npy_float64 *griddata_wgt,
-                 int *lastIndex)
-{
-    long di, gi;
-
-    int appendData;
-    int myIndex = *lastIndex;
-    myIndex = 0;
-
-    //for (gi=0; gi<gpoints; gi++) flagged[gi]=0;
-
-    for (di = 0; di < dpoints; di++) {
-        appendData = 0;
-        if (alldata_x[di] < 0) continue;
-        for (gi = 0; gi < gpoints; gi++) {
-            if ((alldata_x[di] == griddata_x[gi]) &&
-                (alldata_y[di] == griddata_y[gi])) {
-                    alldata_vals[di] = COMB(alldata_vals[di], griddata_vals[gi]);
-                    alldata_wgt[di]  = COMB(alldata_wgt[di], griddata_wgt[gi]);
-                    alldata_mask[di] = (alldata_mask[di] && griddata_mask[gi]);
-                    griddata_x[gi] = -1;
-                    appendData = 0;
-                    myIndex++;
-                    break; // We map to one alldata point AT MOST
+    if (!PyArg_ParseTuple(args, "OOOOOOOOOOi",
+            &ogrid_src_x, &ogrid_src_y, 
+        &ogrid_src_mask, &ogrid_src_wgt, &ogrid_src_vals,
+            &ogrid_dst_x, &ogrid_dst_y,
+        &ogrid_dst_mask, &ogrid_dst_wgt, &ogrid_dst_vals,
+        &refinement_factor))
+    return PyErr_Format(_combineGridsError,
+            "CombineGrids: Invalid parameters.");
+
+    /* First the regular source arrays */
+
+    grid_src_x    = (PyArrayObject *) PyArray_FromAny(ogrid_src_x,
+                    PyArray_DescrFromType(NPY_INT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    src_len = PyArray_SIZE(grid_src_x);
+
+    grid_src_y    = (PyArrayObject *) PyArray_FromAny(ogrid_src_y,
+                    PyArray_DescrFromType(NPY_INT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(PyArray_SIZE(grid_src_y) != src_len) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: src_x and src_y must be the same shape.");
+    goto _fail;
+    }
+
+    grid_src_mask = (PyArrayObject *) PyArray_FromAny(ogrid_src_mask,
+                    PyArray_DescrFromType(NPY_INT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(PyArray_SIZE(grid_src_mask) != src_len) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: src_x and src_mask must be the same shape.");
+    goto _fail;
+    }
+
+    grid_src_wgt  = (PyArrayObject *) PyArray_FromAny(ogrid_src_wgt,
+                    PyArray_DescrFromType(NPY_FLOAT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(PyArray_SIZE(grid_src_wgt) != src_len) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: src_x and src_wgt must be the same shape.");
+    goto _fail;
+    }
+
+    grid_dst_x    = (PyArrayObject *) PyArray_FromAny(ogrid_dst_x,
+                    PyArray_DescrFromType(NPY_INT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    dst_len = PyArray_SIZE(grid_dst_x);
+
+    grid_dst_y    = (PyArrayObject *) PyArray_FromAny(ogrid_dst_y,
+                    PyArray_DescrFromType(NPY_INT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(PyArray_SIZE(grid_dst_y) != dst_len) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: dst_x and dst_y must be the same shape.");
+    goto _fail;
+    }
+
+    grid_dst_mask = (PyArrayObject *) PyArray_FromAny(ogrid_dst_mask,
+                    PyArray_DescrFromType(NPY_INT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(PyArray_SIZE(grid_dst_mask) != dst_len) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: dst_x and dst_mask must be the same shape.");
+    goto _fail;
+    }
+
+    grid_dst_wgt  = (PyArrayObject *) PyArray_FromAny(ogrid_dst_wgt,
+                    PyArray_DescrFromType(NPY_FLOAT64), 1, 0,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(PyArray_SIZE(grid_dst_wgt) != dst_len) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: dst_x and dst_wgt must be the same shape.");
+    goto _fail;
+    }
+
+    /* Now we do our lists of values */
+    NumArrays = PySequence_Length(ogrid_src_vals);
+    if (NumArrays < 1) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: You have to pass me lists of things.");
+    goto _fail;
+    }
+    if (!(PySequence_Length(ogrid_dst_vals) == NumArrays)) {
+    PyErr_Format(_combineGridsError,
+             "CombineGrids: Sorry, but your lists of values are different lengths.");
+    goto _fail;
+    }
+
+    grid_src_vals = malloc(NumArrays * sizeof(PyArrayObject*));
+    grid_dst_vals = malloc(NumArrays * sizeof(PyArrayObject*));
+    npy_float64 **src_vals = malloc(NumArrays * sizeof(npy_float64*));
+    npy_float64 **dst_vals = malloc(NumArrays * sizeof(npy_float64*));
+    PyObject *temp_object;
+    int i;
+    for (i = 0; i < NumArrays; i++) {
+      temp_object = PySequence_GetItem(ogrid_src_vals, i);
+      grid_src_vals[i] = (PyArrayObject *) PyArray_FromAny(
+          temp_object,
+          PyArray_DescrFromType(NPY_FLOAT64), 1, 0,
+          NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+      src_vals[i] = (npy_float64 *) grid_src_vals[i]->data;
+      Py_DECREF(temp_object);
+
+      temp_object = PySequence_GetItem(ogrid_dst_vals, i);
+      grid_dst_vals[i] = (PyArrayObject *) PyArray_FromAny(
+          temp_object,
+          PyArray_DescrFromType(NPY_FLOAT64), 1, 0,
+          NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+      dst_vals[i] = (npy_float64 *) grid_dst_vals[i]->data;
+      Py_DECREF(temp_object);
+    }
+
+    /* Now we're all set to call our sub-function. */
+
+    npy_int64     *src_x    = (npy_int64 *) grid_src_x->data;
+    npy_int64     *src_y    = (npy_int64 *) grid_src_y->data;
+    npy_float64 *src_wgt  = (npy_float64 *) grid_src_wgt->data;
+    npy_int64     *src_mask = (npy_int64 *) grid_src_mask->data;
+
+    npy_int64     *dst_x    = (npy_int64 *) grid_dst_x->data;
+    npy_int64     *dst_y    = (npy_int64 *) grid_dst_y->data;
+    npy_float64 *dst_wgt  = (npy_float64 *) grid_dst_wgt->data;
+    npy_int64     *dst_mask = (npy_int64 *) grid_dst_mask->data;
+
+    int si, di, x_off, y_off;
+    npy_int64  fine_x, fine_y, init_x, init_y;
+    int num_found = 0;
+
+    for (si = 0; si < src_len; si++) {
+      if (src_x[si] < 0) continue;
+      init_x = refinement_factor * src_x[si];
+      init_y = refinement_factor * src_y[si];
+      for (x_off = 0; x_off < refinement_factor; x_off++) {
+        for(y_off = 0; y_off < refinement_factor; y_off++) {
+          fine_x = init_x + x_off;
+          fine_y = init_y + y_off;
+          for (di = 0; di < dst_len; di++) {
+            if (dst_x[di] < 0) continue;
+            if ((fine_x == dst_x[di]) &&
+                (fine_y == dst_y[di])) {
+              num_found++;
+              dst_wgt[di] += src_wgt[di];
+              dst_mask[di] = ((src_mask[si] && dst_mask[di]) ||
+                  ((refinement_factor != 1) && (dst_mask[di])));
+              // So if they are on the same level, then take the logical and
+              // otherwise, set it to the destination mask
+              src_x[si] = -1;
+              for (i = 0; i < NumArrays; i++) {
+                dst_vals[i][di] += src_vals[i][si];
+              }
+              if (refinement_factor == 1) break;
             }
+          }
         }
-        if (appendData) {
-            alldata_x[myIndex] = griddata_x[gi];
-            alldata_y[myIndex] = griddata_y[gi];
-            alldata_mask[myIndex] = griddata_mask[gi];
-            alldata_vals[myIndex] = griddata_vals[gi];
-            alldata_wgt[myIndex] = griddata_wgt[gi];
-            myIndex++;
-        }
+      }
     }
-    *lastIndex = myIndex;
-}
-
-static PyObject *
-Py_CombineData(PyObject *obj, PyObject *args)
-{
-    PyObject   *oalldata_x, *oalldata_y, *oalldata_vals, *oalldata_mask, *oalldata_wgt,
-    	       *ogriddata_x, *ogriddata_y, *ogriddata_vals, *ogriddata_mask, *ogriddata_wgt,
-               *oreturn_index;
-    PyArrayObject   *alldata_x, *alldata_y, *alldata_vals, *alldata_mask, *alldata_wgt,
-	    *griddata_x, *griddata_y, *griddata_vals, *griddata_mask, *griddata_wgt;
-    int lastIndex;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOOOOOi",
-        &oalldata_x, &oalldata_y, &oalldata_vals, &oalldata_mask, &oalldata_wgt,
-        &ogriddata_x, &ogriddata_y, &ogriddata_vals, &ogriddata_mask, &ogriddata_wgt,
-        &lastIndex))
-        return PyErr_Format(_combineError,
-                    "CombineData: Invalid parameters.");
 
-    /* Align, Byteswap, Contiguous, Typeconvert */
-    alldata_x    = (PyArrayObject *) PyArray_FromAny(oalldata_x   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    alldata_y    = (PyArrayObject *) PyArray_FromAny(oalldata_y   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    alldata_vals = (PyArrayObject *) PyArray_FromAny(oalldata_vals, PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    alldata_wgt  = (PyArrayObject *) PyArray_FromAny(oalldata_wgt , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    alldata_mask = (PyArrayObject *) PyArray_FromAny(oalldata_mask, PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-
-    griddata_x    = (PyArrayObject *) PyArray_FromAny(ogriddata_x   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    griddata_y    = (PyArrayObject *) PyArray_FromAny(ogriddata_y   , PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    griddata_vals = (PyArrayObject *) PyArray_FromAny(ogriddata_vals, PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    griddata_wgt  = (PyArrayObject *) PyArray_FromAny(ogriddata_wgt , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    griddata_mask = (PyArrayObject *) PyArray_FromAny(ogriddata_mask, PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-
-    if (!alldata_x || !alldata_y || !alldata_vals || !alldata_wgt || !alldata_mask
-     || !griddata_x || !griddata_y || !griddata_vals || !griddata_wgt || !griddata_mask) {
-        if (!alldata_x)
-          PyErr_Format( _combineError,
-              "CombineData: error converting alldata_x");
-        if (!alldata_y)
-          PyErr_Format( _combineError,
-              "CombineData: error converting alldata_y");
-        if (!alldata_vals)
-          PyErr_Format( _combineError,
-              "CombineData: error converting alldata_vals");
-        if (!alldata_wgt)
-          PyErr_Format( _combineError,
-              "CombineData: error converting alldata_wgt");
-        if (!alldata_mask)
-          PyErr_Format( _combineError,
-              "CombineData: error converting alldata_mask");
-        if (!griddata_x)
-          PyErr_Format( _combineError,
-              "CombineData: error converting griddata_x");
-        if (!griddata_y)
-          PyErr_Format( _combineError,
-              "CombineData: error converting griddata_y");
-        if (!griddata_vals)
-          PyErr_Format( _combineError,
-              "CombineData: error converting griddata_vals");
-        if (!griddata_wgt)
-          PyErr_Format( _combineError,
-              "CombineData: error converting griddata_wgt");
-        if (!griddata_mask)
-          PyErr_Format( _combineError,
-              "CombineData: error converting griddata_mask");
-        goto _fail;
-    }
-
-
-/*
-    if (!NA_ShapeEqual(alldata_x, alldata_y)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and y alldata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(alldata_x, alldata_vals)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and vals alldata numarray need identitcal shapes.");
-        goto _fail;
+    Py_DECREF(grid_src_x);
+    Py_DECREF(grid_src_y);
+    Py_DECREF(grid_src_mask);
+    Py_DECREF(grid_src_wgt);
+
+    Py_DECREF(grid_dst_x);
+    Py_DECREF(grid_dst_y);
+    Py_DECREF(grid_dst_mask);
+    Py_DECREF(grid_dst_wgt);
+
+    if (NumArrays > 0){
+      for (i = 0; i < NumArrays; i++) {
+        Py_DECREF(grid_src_vals[i]);
+        Py_DECREF(grid_dst_vals[i]);
+      }
+    }
+
+    free(grid_src_vals);
+    free(grid_dst_vals);
+    free(src_vals);
+    free(dst_vals);
+
+    PyObject *onum_found = PyInt_FromLong((long)num_found);
+    return onum_found;
+
+_fail:
+    Py_XDECREF(grid_src_x);
+    Py_XDECREF(grid_src_y);
+    Py_XDECREF(grid_src_wgt);
+    Py_XDECREF(grid_src_mask);
+
+    Py_XDECREF(grid_dst_x);
+    Py_XDECREF(grid_dst_y);
+    Py_XDECREF(grid_dst_wgt);
+    Py_XDECREF(grid_dst_mask);
+    if (NumArrays > 0){
+      for (i = 0; i < NumArrays; i++) {
+        Py_XDECREF(grid_src_vals[i]);
+        Py_XDECREF(grid_dst_vals[i]);
+      }
     }
-
-    if (!NA_ShapeEqual(alldata_x, alldata_wgt)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and weight alldata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(griddata_x, griddata_y)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and y griddata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(griddata_x, griddata_vals)) {
-        PyErr_Format(_combineError,
-                 "CombineData: x and vals griddata numarray need identitcal shapes.");
-        goto _fail;
-    }
-
-    if (!NA_ShapeEqual(griddata_wgt, griddata_vals)) {
-        PyErr_Format(_combineError,
-                 "CombineData: weight and vals griddata numarray need identitcal shapes.");
-        goto _fail;
-    }*/
-
-    CombineData(alldata_vals->dimensions[0],
-                (npy_int64 *) alldata_x->data,
-                (npy_int64 *) alldata_y->data,
-                (npy_float64 *) alldata_vals->data,
-                (npy_int64 *) alldata_mask->data,
-                (npy_float64 *) alldata_wgt->data,
-                griddata_vals->dimensions[0],
-                (npy_int64 *) griddata_x->data,
-                (npy_int64 *) griddata_y->data,
-                (npy_float64 *) griddata_vals->data,
-                (npy_int64 *) griddata_mask->data,
-                (npy_float64 *) griddata_wgt->data,
-                &lastIndex);
-
-    Py_XDECREF(alldata_x);
-    Py_XDECREF(alldata_y);
-    Py_XDECREF(alldata_vals);
-    Py_XDECREF(alldata_mask);
-    Py_XDECREF(alldata_wgt);
-    Py_XDECREF(griddata_x);
-    Py_XDECREF(griddata_y);
-    Py_XDECREF(griddata_vals);
-    Py_XDECREF(griddata_mask);
-    Py_XDECREF(griddata_wgt);
-
-    /* Align, Byteswap, Contiguous, Typeconvert */
-    oreturn_index = PyInt_FromLong((long)lastIndex);
-    return oreturn_index;
-
-  _fail:
-    Py_XDECREF(alldata_x);
-    Py_XDECREF(alldata_y);
-    Py_XDECREF(alldata_vals);
-    Py_XDECREF(alldata_wgt);
-    Py_XDECREF(griddata_x);
-    Py_XDECREF(griddata_y);
-    Py_XDECREF(griddata_vals);
-    Py_XDECREF(griddata_wgt);
-
     return NULL;
-}
 
-static void
-FindUpper(long ipoints, npy_float64 *input_axis, long vpoints, npy_float64 *wanted_vals,
-          npy_int64 *upper_inds)
-{
-    npy_int64 i;
-    npy_int64 v;
-    for (v = 0 ; v < vpoints ; v++)
-        for (i = 0 ; i < ipoints ; i++)
-            if (input_axis[i] >= wanted_vals[v]) {
-                upper_inds[v] = i;
-                break;
-            }
-    return;
 }
 
-static PyObject *
-Py_FindUpper(PyObject *obj, PyObject *args)
-{
-    PyObject   *oinput_axis, *odesired_vals, *ooutput_ind;
-    PyArrayObject   *input_axis, *desired_vals, *output_ind;
-
-    if (!PyArg_ParseTuple(args, "OOO",
-        &oinput_axis, &odesired_vals, &ooutput_ind))
-        return PyErr_Format(_combineError,
-                    "FindUpper: Invalid parameters.");
-
-    /* Align, Byteswap, Contiguous, Typeconvert */
-    input_axis    =  (PyArrayObject *) PyArray_FromAny(oinput_axis   , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL );
-    desired_vals  =  (PyArrayObject *) PyArray_FromAny(odesired_vals , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL );
-    output_ind    =  (PyArrayObject *) PyArray_FromAny(ooutput_ind   ,   PyArray_DescrFromType(NPY_INT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-
-    if (!input_axis || !desired_vals) {
-        PyErr_Format( _combineError,
-                  "FindUpper: error converting array inputs.");
-        goto _fail;
-    }
-
-/*
-    if (!NA_ShapeEqual(output_ind, desired_vals)) {
-        PyErr_Format(_combineError,
-                 "FindUpper: vals and output need identical shapes.");
-        goto _fail;
-    }
-*/
-    FindUpper(input_axis->dimensions[0],
-              (npy_float64 *) input_axis->data,
-              desired_vals->dimensions[0],
-              (npy_float64 *) desired_vals->data,
-              (npy_int64 *) output_ind->data);
-
-    Py_XDECREF(input_axis);
-    Py_XDECREF(desired_vals);
-    Py_XDECREF(output_ind);
-
-    /* Align, Byteswap, Contiguous, Typeconvert */
-    return Py_None;
-
-  _fail:
-    Py_XDECREF(input_axis);
-    Py_XDECREF(desired_vals);
-    Py_XDECREF(output_ind);
-
-    return NULL;
-}
+static PyObject *_interpolateError;
 
 static void
 Interpolate(long num_axis_points, npy_float64 *axis, PyArrayObject* table,
@@ -524,7 +312,7 @@
 
     if (!PyArg_ParseTuple(args, "OOOOO",
         &oaxis, &otable, &odesired, &ooutputvals, &ocolumns))
-        return PyErr_Format(_combineError,
+        return PyErr_Format(_interpolateError,
                     "Interpolate: Invalid parameters.");
 
     /* Align, Byteswap, Contiguous, Typeconvert */
@@ -535,13 +323,13 @@
     columns       =  (PyArrayObject *) PyArray_FromAny(ocolumns      ,   PyArray_DescrFromType(NPY_INT32), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL );
 
     if (!axis || !table || !desired || !outputvals || !columns) {
-        PyErr_Format( _combineError,
+        PyErr_Format(_interpolateError,
                   "Interpolate: error converting array inputs.");
         goto _fail;
     }
 
     if (columns->dimensions[0] != outputvals->dimensions[1]) {
-        PyErr_Format(_combineError,
+        PyErr_Format(_interpolateError,
                  "Interpolate: number of columns requested must match number "
                  "of columns in output buffer. %i", (int) columns->dimensions[0]);
         goto _fail;
@@ -573,127 +361,9 @@
     return NULL;
 }
 
-static void
-BinProfile(long num_bins, npy_int64 *binindices, npy_float64 *profilevalues,
-           long num_elements, npy_float64 *fieldvalues, npy_float64 *weightvalues)
-{
-    // We are going to assume -- making an ass out of you and me -- that the
-    // values of the profile are already zero.
-
-    npy_float64 weights[num_bins];
-    npy_float64 myweight;
-    npy_int64 mybin = -1;
-    int bin;
-    int element;
-
-    for (bin = 0; bin < num_bins ; bin++) {
-        weights[bin] = 0;
-    }
-
-    if (weightvalues[0] != -999) {
-        for (element = 0; element < num_elements ; element++) {
-            mybin = binindices[element];
-            myweight = weightvalues[mybin];
-            profilevalues[mybin] += myweight*fieldvalues[element];
-            weights[mybin] += myweight;
-        }
-        for (bin = 0; bin < num_bins ; bin++) {
-            profilevalues[bin] = profilevalues[bin] / weights[bin];
-        }
-    } else {
-        for (element = 0; element < num_elements ; element++) {
-            mybin = binindices[element];
-            myweight = weightvalues[mybin];
-            profilevalues[mybin] += fieldvalues[element];
-        }
-        for (bin = 1; bin < num_bins ; bin++) {
-            profilevalues[bin] += profilevalues[bin-1];
-            //printf("ProfileValues: %i\t%0.4e\n", bin, profilevalues[bin]);
-        }
-    }
-
-    // Okay, and we're done.
-
-}
-
-static PyObject *
-Py_BinProfile(PyObject *obj, PyObject *args)
-{
-    PyObject   *ofield, *obinindices, *oprofile, *oweightfield;
-    PyArrayObject   *field, *binindices, *profile, *weightfield;
-
-    if (!PyArg_ParseTuple(args, "OOOO",
-                &ofield, &obinindices, &oprofile, &oweightfield))
-        return PyErr_Format(_combineError,
-                    "Interpolate: Invalid parameters.");
-
-    /* Align, Byteswap, Contiguous, Typeconvert */
-    field       =  (PyArrayObject *) PyArray_FromAny(ofield         , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    binindices  =  (PyArrayObject *) PyArray_FromAny(obinindices    , PyArray_DescrFromType(NPY_INT64)  , 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    profile     =  (PyArrayObject *) PyArray_FromAny(oprofile       , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-    weightfield =  (PyArrayObject *) PyArray_FromAny(oweightfield   , PyArray_DescrFromType(NPY_FLOAT64), 1, 0, NPY_ENSURECOPY | NPY_UPDATEIFCOPY, NULL);
-
-    if (!field){
-        PyErr_Format( _combineError,
-                  "BinProfile: error converting array inputs. (field)");
-        goto _fail;
-    }
-
-    if (!binindices){
-        PyErr_Format( _combineError,
-                  "BinProfile: error converting array inputs. (binindices)");
-        goto _fail;
-    }
-
-    if (!profile ){
-        PyErr_Format( _combineError,
-                  "BinProfile: error converting array inputs. (profile)");
-        goto _fail;
-    }
-
-    if (!weightfield) {
-        PyErr_Format( _combineError,
-                  "BinProfile: error converting array inputs. (weightfield)");
-        goto _fail;
-    }
-
-    if (field->dimensions[0] != binindices->dimensions[0]) {
-        PyErr_Format(_combineError,
-                 "BinProfile: number of field values must match number of "
-                  "indices.  %i, %i", (int) field->dimensions[0],
-                                      (int) binindices->dimensions[0] );
-        goto _fail;
-    }
-
-    BinProfile(profile->dimensions[0],
-               (npy_int64 *) binindices->data,
-               (npy_float64 *) profile->data,
-               field->dimensions[0],
-               (npy_float64 *) field->data,
-               (npy_float64 *) weightfield->data);
-
-    Py_XDECREF(field);
-    Py_XDECREF(binindices);
-    Py_XDECREF(profile);
-    Py_XDECREF(weightfield);
-
-    return Py_None;
-
-  _fail:
-    Py_XDECREF(field);
-    Py_XDECREF(binindices);
-    Py_XDECREF(profile);
-    Py_XDECREF(weightfield);
-
-    return NULL;
-}
-
 static PyMethodDef _combineMethods[] = {
-    {"RefineCoarseData", Py_RefineCoarseData, METH_VARARGS},
-    {"CombineData", Py_CombineData, METH_VARARGS},
-    {"FindUpper", Py_FindUpper, METH_VARARGS},
+    {"CombineGrids", Py_CombineGrids, METH_VARARGS},
     {"Interpolate", Py_Interpolate, METH_VARARGS},
-    {"BinProfile", Py_BinProfile, METH_VARARGS},
     {NULL, NULL} /* Sentinel */
 };
 
@@ -707,8 +377,10 @@
     PyObject *m, *d;
     m = Py_InitModule("PointCombine", _combineMethods);
     d = PyModule_GetDict(m);
-    _combineError = PyErr_NewException("PointCombine.error", NULL, NULL);
-    PyDict_SetItemString(d, "error", _combineError);
+    _combineGridsError = PyErr_NewException("PointCombine.CombineGridsError", NULL, NULL);
+    PyDict_SetItemString(d, "error", _combineGridsError);
+    _interpolateError = PyErr_NewException("PointCombine.InterpolateError", NULL, NULL);
+    PyDict_SetItemString(d, "error", _interpolateError);
     import_array();
 }
 

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Mon Mar  3 13:06:39 2008
@@ -65,6 +65,7 @@
 # Now we import all the subfiles
 
 import PointCombine
+import HDF5LightReader
 from WeaveStrings import *
 from EnzoDefs import *
 from DerivedFields import *
@@ -91,4 +92,4 @@
     # We assume that it is with respect to the $HOME/.yt directory
     execfile(os.path.expanduser("~/.yt/%s" % my_plugin_name))
 
-log_fields = [] # @todo: GET RID OF THIS
\ No newline at end of file
+log_fields = [] # @todo: GET RID OF THIS

Modified: trunk/yt/lagos/setup.py
==============================================================================
--- trunk/yt/lagos/setup.py	(original)
+++ trunk/yt/lagos/setup.py	Mon Mar  3 13:06:39 2008
@@ -6,8 +6,18 @@
     config = Configuration('lagos',parent_package,top_path)
     config.make_config_py() # installs __config__.py
     config.add_extension("PointCombine", "yt/lagos/PointCombine.c", libraries=["m"])
-    sys.argv.extend(["config_fc","--f77flags","'-Dr16 -ffixed-line-length-none -fno-second-underscore -DPYFORT -DNOMETALS -ggdb'"])
-    if os.environ.has_key("YT_FORT"):
+    if not os.environ.has_key("HDF5_INSTALL"):
+        print "You need to set the environment variable HDF5_INSTALL to point to your HDF5 base"
+        #raise KeyError("HDF5_INSTALL")
+        sys.exit(1)
+    H5dir = os.environ["HDF5_INSTALL"]
+    include_dirs=[os.path.join(H5dir,"include")]
+    library_dirs=[os.path.join(H5dir,"lib")]
+    config.add_extension("HDF5LightReader", "yt/lagos/HDF5LightReader.c",
+                         libraries=["m","hdf5_hl","hdf5"],
+                         library_dirs=library_dirs, include_dirs=include_dirs)
+    sys.argv.extend(["config_fc","--f77flags","'-Dr16 -ffixed-line-length-none -fno-second-underscore -DPYFORT -DNOMETALS -ggdb -O0'"])
+    if 0:
         config.add_extension("EnzoFortranRoutines", \
                             ["yt/lagos/solve_rate_cool.pyf", "yt/lagos/f_src/*.F"])
     return config



More information about the yt-svn mailing list