[Yt-svn] yt-commit r1140 - in trunk: . tests yt/lagos yt/raven

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Fri Jan 23 13:32:10 PST 2009


Author: mturk
Date: Fri Jan 23 13:32:09 2009
New Revision: 1140
URL: http://yt.spacepope.org/changeset/1140

Log:
Rolling in the grid-optimization branch, which will now ... perish.

Memory usage goes way down with this.

Furthermore, CoveringGrids now have *correct* d[xyz].

Grid have fewer properties.

mpi4py subversion trunk from google code is now required for parallel usage;
this also reduced memory requirements and speeds things up.


Modified:
   trunk/   (props changed)
   trunk/tests/test_lagos.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/EnzoFields.py
   trunk/yt/lagos/FieldInfoContainer.py
   trunk/yt/lagos/ParallelTools.py
   trunk/yt/lagos/UniversalFields.py
   trunk/yt/lagos/setup.py
   trunk/yt/raven/Callbacks.py

Modified: trunk/tests/test_lagos.py
==============================================================================
--- trunk/tests/test_lagos.py	(original)
+++ trunk/tests/test_lagos.py	Fri Jan 23 13:32:09 2009
@@ -216,6 +216,7 @@
         ps = cPickle.dumps(self.data)
         pf, obj = cPickle.loads(ps)
         self.assertEqual(obj["CellMassMsun"].sum(), self.data["CellMassMsun"].sum())
+        print "TEST PICKLE"
 
 for field_name in yt.lagos.FieldInfo:
     field = yt.lagos.FieldInfo[field_name]
@@ -299,7 +300,7 @@
             self.assertTrue(na.all(cube1["Density"] == cube2a["Density"]))
             self.assertTrue(na.all(cube1["Temperature"] == cube2b["Temperature"]))
     
-    def testFlushBack(self):
+    def testFlushBackToGrids(self):
         ml = self.hierarchy.max_level
         cg = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
         cg["Ones"] *= 2.0
@@ -307,8 +308,14 @@
         for g in na.concatenate([self.hierarchy.select_grids(i) for i in range(3)]):
             self.assertEqual(g["Ones"].max(), 2.0)
             self.assertEqual(g["Ones"][g["Ones"]*g.child_mask>0].min(), 2.0)
+
+    def testFlushBackToNewCover(self):
+        ml = self.hierarchy.max_level
+        cg = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
+        cg["tempContours"] = cg["Ones"] * 2.0
+        cg.flush_data(field="tempContours")
         cg2 = self.hierarchy.covering_grid(3, [0.0]*3, [1.0]*3, [64,64,64])
-        self.assertTrue(na.all(cg["Ones"] == cg2["Ones"]))
+        self.assertTrue(na.all(cg["tempContours"] == cg2["tempContours"]))
 
     def testRawFlushBack(self):
         ml = self.hierarchy.max_level
@@ -331,6 +338,10 @@
         self.assertTrue(cg["Density"].min() \
                      == self.hierarchy.grids[0]["Density"].min())
 
+    def testCellVolume(self):
+        cg = self.hierarchy.covering_grid(2, [0.0]*3, [1.0]*3, [64,64,64])
+        self.assertEqual(na.unique(cg["CellVolume"]).size, 1)
+
 class TestDiskDataType(Data3DBase, DataTypeTestingBase, LagosTestingBase, unittest.TestCase):
     def setUp(self):
         DataTypeTestingBase.setUp(self)

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Fri Jan 23 13:32:09 2009
@@ -106,7 +106,7 @@
     """
     _grids = None
     _num_ghost_zones = 0
-    _con_args = []
+    _con_args = ()
 
     def __init__(self, pf, fields, **kwargs):
         """
@@ -375,7 +375,7 @@
 class AMROrthoRayBase(AMR1DData):
     _key_fields = ['x','y','z','dx','dy','dz']
     _type_name = "ortho_ray"
-    _con_args = ['axis', 'coords']
+    _con_args = ('axis', 'coords')
     def __init__(self, axis, coords, fields=None, pf=None, **kwargs):
         """
         Dimensionality is reduced to one, and an ordered list of points at an
@@ -421,7 +421,7 @@
 
 class AMRRayBase(AMR1DData):
     _type_name = "ray"
-    _con_args = ['start_point', 'end_point']
+    _con_args = ('start_point', 'end_point')
     def __init__(self, start_point, end_point, fields=None, pf=None, **kwargs):
         """
         We accept a start point and an end point and then get all the data
@@ -473,8 +473,7 @@
         mask = na.zeros(grid.ActiveDimensions, dtype='int')
         import RTIntegrator as RT
         RT.VoxelTraversal(mask, grid.LeftEdge, grid.RightEdge,
-                          na.array([grid.dx, grid.dy, grid.dz]),
-                          self.center, self.vec)
+                          grid.dds, self.center, self.vec)
         return mask
 
 class AMR2DData(AMRData, GridPropertiesMixin, ParallelAnalysisInterface):
@@ -617,7 +616,7 @@
 
     _top_node = "/Slices"
     _type_name = "slice"
-    _con_args = ['axis', 'coord']
+    _con_args = ('axis', 'coord')
     #@time_execution
     def __init__(self, axis, coord, fields = None, center=None, pf=None,
                  node_name = False, source = None, **kwargs):
@@ -706,7 +705,7 @@
     def _generate_grid_coords(self, grid):
         xaxis = x_dict[self.axis]
         yaxis = y_dict[self.axis]
-        ds, dx, dy = grid['dds'][self.axis], grid['dds'][xaxis], grid['dds'][yaxis]
+        ds, dx, dy = grid.dds[self.axis], grid.dds[xaxis], grid.dds[yaxis]
         wantedIndex = int(((self.coord-grid.LeftEdge[self.axis])/ds))
         sl = [slice(None), slice(None), slice(None)]
         sl[self.axis] = slice(wantedIndex, wantedIndex + 1)
@@ -733,7 +732,7 @@
     def _get_data_from_grid(self, grid, field):
         # So what's our index of slicing?  This is what we need to figure out
         # first, so we can deal with our data in the fastest way.
-        dx = grid['dds'][self.axis]
+        dx = grid.dds[self.axis]
         wantedIndex = int(((self.coord-grid.LeftEdge[self.axis])/dx))
         sl = [slice(None), slice(None), slice(None)]
         sl[self.axis] = slice(wantedIndex, wantedIndex + 1)
@@ -770,7 +769,7 @@
     _top_node = "/CuttingPlanes"
     _key_fields = AMR2DData._key_fields + ['pz','pdz']
     _type_name = "cutting"
-    _con_args = ['normal', 'center']
+    _con_args = ('normal', 'center')
     def __init__(self, normal, center, fields = None, node_name = None,
                  **kwargs):
         """
@@ -830,16 +829,16 @@
         # This is slow.  Suggestions for improvement would be great...
         ss = grid.ActiveDimensions
         D = na.ones(ss) * self._d
-        x = grid.LeftEdge[0] + grid.dx * \
+        x = grid.LeftEdge[0] + grid.dds[0] * \
                 (na.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
-        y = grid.LeftEdge[1] + grid.dy * \
+        y = grid.LeftEdge[1] + grid.dds[1] * \
                 (na.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
-        z = grid.LeftEdge[2] + grid.dz * \
+        z = grid.LeftEdge[2] + grid.dds[2] * \
                 (na.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
         D += (x * self._norm_vec[0]).reshape(ss[0],1,1)
         D += (y * self._norm_vec[1]).reshape(1,ss[1],1)
         D += (z * self._norm_vec[2]).reshape(1,1,ss[2])
-        diag_dist = na.sqrt(na.sum(grid['dds']**2.0))
+        diag_dist = na.sqrt(na.sum(grid.dds**2.0))
         cm = (na.abs(D) <= 0.5*diag_dist) # Boolean
         return cm
 
@@ -892,7 +891,7 @@
     _top_node = "/Projections"
     _key_fields = AMR2DData._key_fields + ['weight_field']
     _type_name = "proj"
-    _con_args = ['axis', 'field', 'weight_field']
+    _con_args = ('axis', 'field', 'weight_field')
     def __init__(self, axis, field, weight_field = None,
                  max_level = None, center = None, pf = None,
                  source=None, node_name = None, field_cuts = None, **kwargs):
@@ -1027,7 +1026,7 @@
             field_data *= convs[...,na.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
-        dx = grids_to_project[0]['dds'][self.axis] # this is our dl
+        dx = grids_to_project[0].dds[self.axis] # this is our dl
         return coord_data, dx, field_data
 
     def __combine_grids_on_level(self, level):
@@ -1071,7 +1070,7 @@
                     args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                     args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                     # Refinement factor, which is same in all directions
-                    args.append(int(grid2.dx / grid1.dx)) 
+                    args.append(int(grid2['dx'] / grid1['dx'])) 
                     args.append(na.ones(args[0].shape, dtype='int64'))
                     kk = PointCombine.CombineGrids(*args)
                     goodI = args[-1].astype('bool')
@@ -1219,7 +1218,6 @@
         self.center = center
         self.set_field_parameter("center",center)
         self.coords = None
-        self.dx = None
         self._grids = None
 
     def _generate_coords(self):
@@ -1243,7 +1241,7 @@
     @restore_grid_state
     def _generate_grid_coords(self, grid, field=None):
         pointI = self._get_point_indices(grid)
-        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dx
+        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
         tr = na.array([grid['x'][pointI].ravel(), \
                 grid['y'][pointI].ravel(), \
                 grid['z'][pointI].ravel(), \
@@ -1418,7 +1416,7 @@
     for things like selection along a baryon field.
     """
     _type_name = "extracted_region"
-    _con_args = ['_base_region', '_indices']
+    _con_args = ('_base_region', '_indices')
     def __init__(self, base_region, indices, force_refresh=True, **kwargs):
         cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,
@@ -1526,7 +1524,7 @@
     We can define a cylinder (or disk) to act as a data object.
     """
     _type_name = "disk"
-    _con_args = ['center', '_norm_vec', '_radius', '_height']
+    _con_args = ('center', '_norm_vec', '_radius', '_height')
     def __init__(self, center, normal, radius, height, fields=None,
                  pf=None, **kwargs):
         """
@@ -1589,7 +1587,7 @@
     AMRRegions are rectangular prisms of data.
     """
     _type_name = "region"
-    _con_args = ['center', 'left_edge', 'right_edge']
+    _con_args = ('center', 'left_edge', 'right_edge')
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
         """
@@ -1628,7 +1626,7 @@
     AMRRegions are rectangular prisms of data.
     """
     _type_name = "periodic_region"
-    _con_args = ['center', 'left_edge', 'right_edge']
+    _con_args = ('center', 'left_edge', 'right_edge')
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
         """
@@ -1713,7 +1711,7 @@
     A sphere of points
     """
     _type_name = "sphere"
-    _con_args = ['center', 'radius']
+    _con_args = ('center', 'radius')
     def __init__(self, center, radius, fields = None, pf = None, **kwargs):
         """
         The most famous of all the data objects, we define it via a
@@ -1761,7 +1759,7 @@
     """
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ['level', 'left_edge', 'right_edge', 'ActiveDimensions']
+    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, right_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """
@@ -1774,11 +1772,12 @@
         self.right_edge = na.array(right_edge)
         self.level = level
         self.ActiveDimensions = na.array(dims)
-        self.dx, self.dy, self.dz = (self.right_edge-self.left_edge) \
-                                  / self.ActiveDimensions
-        self.data["dx"] = self.dx
-        self.data["dy"] = self.dy
-        self.data["dz"] = self.dz
+        dds = (self.right_edge-self.left_edge) \
+              / self.ActiveDimensions
+        self.dds = dds
+        self.data["dx"] = dds[0]
+        self.data["dy"] = dds[1]
+        self.data["dz"] = dds[2]
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
         self._refresh_data()
@@ -1802,37 +1801,52 @@
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
-        self['dx'] = self.dx * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dy * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dz * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
 
-    def get_data(self, field=None):
-        self._get_list_of_grids()
-        # We don't generate coordinates here.
-        if field == None:
-            _fields_to_get = self.fields
+    def get_data(self, fields=None):
+        if self._grids is None:
+            self._get_list_of_grids()
+        if fields is None:
+            fields = self.fields[:]
         else:
-            _fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in _fields_to_get if f not in self.data]
-        if len(fields_to_get) == 0: return
-        for field in fields_to_get:
+            fields = ensure_list(fields)
+        obtain_fields = []
+        for field in fields:
+            if self.data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            obtain_fields.append(field)
             self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+        if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
-                   fields_to_get, len(self._grids))
+                   obtain_fields, len(self._grids))
         if self._use_pbar: pbar = \
                 get_pbar('Searching grids for values ', len(self._grids))
-        field = fields_to_get[-1]
-        for i,grid in enumerate(self._grids):
+        for i, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(i)
-            self._get_data_from_grid(grid, fields_to_get)
-            if not na.any(self[field] == -999): break
+            self._get_data_from_grid(grid, obtain_fields)
+            if not na.any(self[obtain_fields[0]] == -999): break
         if self._use_pbar: pbar.finish()
-        if na.any(self[field] == -999):
+        if na.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            print "COVERING PROBLEM", na.where(self[field]==-999)[0].size
-            print na.where(self[fields_to_get[0]]==-999)
-            return
+            print "COVERING PROBLEM", na.where(self[obtain_fields[0]]==-999)[0].size
+            print na.where(self[obtain_fields[0]]==-999)
             raise KeyError
+            
+    def _generate_field(self, field):
+        if self.pf.field_info.has_key(field):
+            # First we check the validator; this might even raise!
+            self.pf.field_info[field].check_available(self)
+            self[field] = self.pf.field_info[field](self)
+        else: # Can't find the field, try as it might
+            raise exceptions.KeyError(field)
 
     def flush_data(self, field=None):
         """
@@ -1852,8 +1866,8 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
-        g_dx = na.array([grid.dx, grid.dy, grid.dz])
-        c_dx = na.array([self.dx, self.dy, self.dz])
+        g_dx = grid.dds.ravel()
+        c_dx = self.dds.ravel()
         g_fields = [grid[field] for field in ensure_list(fields)]
         c_fields = [self[field] for field in ensure_list(fields)]
         PointCombine.DataCubeRefine(
@@ -1863,8 +1877,8 @@
 
     def _flush_data_to_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
-        g_dx = na.array([grid.dx, grid.dy, grid.dz])
-        c_dx = na.array([self.dx, self.dy, self.dz])
+        g_dx = grid.dds.ravel()
+        c_dx = self.dds.ravel()
         g_fields = []
         for field in ensure_list(fields):
             if not grid.has_key(field): grid[field] = \
@@ -1876,6 +1890,14 @@
             self.left_edge, self.right_edge, c_dx, c_fields,
             ll, self.pf["DomainLeftEdge"], self.pf["DomainRightEdge"])
 
+    @property
+    def LeftEdge(self):
+        return self.left_edge
+
+    @property
+    def RightEdge(self):
+        return self.right_edge
+
 class AMRSmoothedCoveringGridBase(AMRCoveringGridBase):
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
@@ -1886,21 +1908,23 @@
         kwargs['num_ghost_zones'] = 0
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         if na.any(self.left_edge == self.pf["DomainLeftEdge"]):
-            self.left_edge += self.dx
+            self.left_edge += self.dds
             self.ActiveDimensions -= 1
         if na.any(self.right_edge == self.pf["DomainRightEdge"]):
-            self.right_edge -= self.dx
+            self.right_edge -= self.dds
             self.ActiveDimensions -= 1
 
     def _get_list_of_grids(self):
-        if na.any(self.left_edge - self.dx < self.pf["DomainLeftEdge"]) or \
-           na.any(self.right_edge + self.dx > self.pf["DomainRightEdge"]):
+        if na.any(self.left_edge - self.dds < self.pf["DomainLeftEdge"]) or \
+           na.any(self.right_edge + self.dds > self.pf["DomainRightEdge"]):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
-                            self.left_edge - self.dx, self.right_edge + self.dx)
+                            self.left_edge - self.dds,
+                            self.right_edge + self.dds)
             ind = slice(None)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge - self.dx, self.right_edge + self.dx)
+                            self.left_edge - self.dds,
+                            self.right_edge + self.dds)
         level_ind = na.where(self.pf.hierarchy.gridLevels.ravel()[ind] <= self.level)
         sort_ind = na.argsort(self.pf.h.gridLevels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)]
@@ -1955,7 +1979,7 @@
             # How do we find out the root grid base dx?
             idims = na.array([3,3,3])
             dx = na.minimum((self.right_edge-self.left_edge)/(idims-2),
-                            self.pf.h.grids[0].dx)
+                            self.pf.h.grids[0]['dx'])
             idims = na.floor((self.right_edge-self.left_edge)/dx) + 2
             for ax in 'xyz': self['cd%s'%ax] = dx[0]
             self[field] = na.zeros(idims,dtype='float64')-999
@@ -1971,7 +1995,7 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, fields):
         fields = ensure_list(fields)
-        g_dx = na.array([grid.dx, grid.dy, grid.dz])
+        g_dx = grid.dds
         c_dx = na.array([self['cdx'],self['cdy'],self['cdz']])
         g_fields = [grid[field] for field in fields]
         c_fields = [self[field] for field in fields]

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Fri Jan 23 13:32:09 2009
@@ -34,7 +34,7 @@
     _id_offset = 1
 
     _type_name = 'grid'
-    _con_args = ['id', 'filename']
+    _con_args = ('id', 'filename')
 
     def __init__(self, id, filename=None, hierarchy = None):
         self.data = {}
@@ -45,9 +45,6 @@
         if (id % 1e4) == 0: mylog.debug("Prepared grid %s", id)
         if hierarchy: self.hierarchy = weakref.proxy(hierarchy)
         if filename: self.set_filename(filename)
-        self.overlap_masks = [None, None, None]
-        self._overlap_grids = [None, None, None]
-        self._file_access_pooling = False
         self.pf = self.hierarchy.parameter_file # weakref already
 
     def _generate_field(self, field):
@@ -100,26 +97,14 @@
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
-        self.dx = self.hierarchy.gridDxs[id,0]
-        self.dy = self.hierarchy.gridDys[id,0]
-        self.dz = self.hierarchy.gridDzs[id,0]
-        self.data['dx'] = self.dx
-        self.data['dy'] = self.dy
-        self.data['dz'] = self.dz
-        self.data['dds'] = na.array([self.dx, self.dy, self.dz])
+        self.dds = na.array([self.hierarchy.gridDxs[id,0],
+                                     self.hierarchy.gridDys[id,0],
+                                     self.hierarchy.gridDzs[id,0]])
+        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([
-            [self.LeftEdge[0], self.LeftEdge[1], self.LeftEdge[2]],
-            [self.RightEdge[0], self.LeftEdge[1], self.LeftEdge[2]],
-            [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
-            [self.RightEdge[0], self.RightEdge[1], self.RightEdge[2]],
-            [self.LeftEdge[0], self.RightEdge[1], self.RightEdge[2]],
-            [self.LeftEdge[0], self.LeftEdge[1], self.RightEdge[2]],
-            [self.RightEdge[0], self.LeftEdge[1], self.RightEdge[2]],
-            [self.LeftEdge[0], self.RightEdge[1], self.LeftEdge[2]]
-        ], dtype='float64')
+        return self.hierarchy.gridCorners[:,:,self.id - self._id_offset]
 
     def _generate_overlap_masks(self, axis, LE, RE):
         """
@@ -182,15 +167,14 @@
         # Note that to keep in line with Enzo, we have broken PEP-8
         h = self.hierarchy # cache it
         my_ind = self.id - self._id_offset
-        self.Dimensions = h.gridDimensions[my_ind]
-        self.StartIndices = h.gridStartIndices[my_ind]
-        self.EndIndices = h.gridEndIndices[my_ind]
+        self.ActiveDimensions = h.gridEndIndices[my_ind] \
+                              - h.gridStartIndices[my_ind] + 1
         self.LeftEdge = h.gridLeftEdge[my_ind]
         self.RightEdge = h.gridRightEdge[my_ind]
         self.Level = h.gridLevels[my_ind,0]
-        self.Time = h.gridTimes[my_ind,0]
+        # This might be needed for streaming formats
+        #self.Time = h.gridTimes[my_ind,0]
         self.NumberOfParticles = h.gridNumberOfParticles[my_ind,0]
-        self.ActiveDimensions = (self.EndIndices - self.StartIndices + 1)
         self.Children = h.gridTree[my_ind]
         pID = h.gridReverseTree[my_ind]
         if pID != None and pID != -1:
@@ -223,7 +207,7 @@
         """
         Returns center position of an *index*
         """
-        pos = (index + 0.5) * self.dx + self.LeftEdge
+        pos = (index + 0.5) * self.dds + self.LeftEdge
         return pos
 
     def clear_all(self):
@@ -295,10 +279,11 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        startIndex = na.maximum(0, na.rint((child.LeftEdge - self.LeftEdge)/self.dx))
-        endIndex = na.minimum(na.rint((child.RightEdge - self.LeftEdge)/self.dx),
+        startIndex = na.maximum(0, na.rint(
+                    (child.LeftEdge - self.LeftEdge)/self.dds))
+        endIndex = na.minimum(na.rint(
+                    (child.RightEdge - self.LeftEdge)/self.dds),
                               self.ActiveDimensions)
-                              #startIndex + self.ActiveDimensions)
         startIndex = na.maximum(0, startIndex)
         mask[startIndex[0]:endIndex[0],
              startIndex[1]:endIndex[1],
@@ -343,7 +328,7 @@
         #print "Generating coords"
         ind = na.indices(self.ActiveDimensions)
         LE = na.reshape(self.LeftEdge,(3,1,1,1))
-        self['x'], self['y'], self['z'] = (ind+0.5)*self.dx+LE
+        self['x'], self['y'], self['z'] = (ind+0.5)*self.dds+LE
 
     __child_mask = None
     __child_indices = None
@@ -357,8 +342,8 @@
                              smoothed=False):
         # We will attempt this by creating a datacube that is exactly bigger
         # than the grid by nZones*dx in each direction
-        new_left_edge = self.LeftEdge - n_zones * self.dx
-        new_right_edge = self.RightEdge + n_zones * self.dx
+        new_left_edge = self.LeftEdge - n_zones * self.dds
+        new_right_edge = self.RightEdge + n_zones * self.dds
         # Something different needs to be done for the root grid, though
         level = self.Level
         if all_levels:
@@ -405,7 +390,6 @@
         """
         #All of the field parameters will be passed to us as needed.
         AMRGridPatch.__init__(self, id, filename, hierarchy)
-        self._file_access_pooling = False
 
     def _guess_properties_from_parent(self):
         """
@@ -418,17 +402,16 @@
         rf = self.pf["RefineBy"]
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
-        self.dx = self.Parent.dx/rf
-        self.dy = self.Parent.dy/rf
-        self.dz = self.Parent.dz/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dx)
+        self['dx'] = self.Parent['dx']/rf
+        self['dy'] = self.Parent['dy']/rf
+        self['dz'] = self.Parent['dz']/rf
+        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
-        self.LeftEdge = self.Parent.LeftEdge + self.Parent.dx * ParentLeftIndex
-        self.RightEdge = self.LeftEdge + \
-                         self.ActiveDimensions*na.array([self.dx,self.dy,self.dz])
-        self.hierarchy.gridDxs[my_ind,0] = self.dx
-        self.hierarchy.gridDys[my_ind,0] = self.dy
-        self.hierarchy.gridDzs[my_ind,0] = self.dz
+        self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
+        self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
+        self.hierarchy.gridDxs[my_ind,0] = self['dx']
+        self.hierarchy.gridDys[my_ind,0] = self['dy']
+        self.hierarchy.gridDzs[my_ind,0] = self['dz']
         self.hierarchy.gridLeftEdge[my_ind,:] = self.LeftEdge
         self.hierarchy.gridRightEdge[my_ind,:] = self.RightEdge
         self.hierarchy.gridCorners[:,:,my_ind] = na.array([ # Unroll!
@@ -454,9 +437,9 @@
         if self.start_index != None:
             return self.start_index
         if self.Parent == None:
-            start_index = self.LeftEdge / na.array([self.dx, self.dy, self.dz])
+            start_index = self.LeftEdge / self.dds
             return na.rint(start_index).astype('int64').ravel()
-        pdx = na.array([self.Parent.dx, self.Parent.dy, self.Parent.dz]).ravel()
+        pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
                        na.rint((self.LeftEdge - self.Parent.LeftEdge)/pdx)
         self.start_index = (start_index*self.pf["RefineBy"]).astype('int64').ravel()
@@ -479,7 +462,6 @@
     _id_offset = 0
     def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions,start,stop,paranoia=False):
         AMRGridPatch.__init__(self, index)
-        self._file_access_pooling = False
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia
@@ -494,7 +476,7 @@
         self.Level = level
 
     def get_global_startindex(self):
-        return self.start + na.rint(self.pf["DomainLeftEdge"]/self.dx)
+        return self.start + na.rint(self.pf["DomainLeftEdge"]/self.dds)
 
     def _prepare_grid(self):
         """

Modified: trunk/yt/lagos/EnzoFields.py
==============================================================================
--- trunk/yt/lagos/EnzoFields.py	(original)
+++ trunk/yt/lagos/EnzoFields.py	Fri Jan 23 13:32:09 2009
@@ -163,7 +163,7 @@
                             data["particle_position_y"],
                             data["particle_position_z"], 3,
                             data["particle_mass"],
-                            blank, data.LeftEdge, data.dx)
+                            blank, data.LeftEdge, data['dx'])
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)

Modified: trunk/yt/lagos/FieldInfoContainer.py
==============================================================================
--- trunk/yt/lagos/FieldInfoContainer.py	(original)
+++ trunk/yt/lagos/FieldInfoContainer.py	Fri Jan 23 13:32:09 2009
@@ -95,18 +95,30 @@
     def __init__(self, ghost_zones = 0, fields=None):
         self.ghost_zones = ghost_zones
         self.fields = fields
+    def __str__(self):
+        return "(%s, %s)" % (self.ghost_zones, self.fields)
+
+class NeedsOriginalGrid(NeedsGridType):
+    def __init__(self):
+        self.ghost_zones = 0
 
 class NeedsDataField(ValidationException):
     def __init__(self, missing_fields):
         self.missing_fields = missing_fields
+    def __str__(self):
+        return "(%s)" % (self.missing_fields)
 
 class NeedsProperty(ValidationException):
     def __init__(self, missing_properties):
         self.missing_properties = missing_properties
+    def __str__(self):
+        return "(%s)" % (self.missing_properties)
 
 class NeedsParameter(ValidationException):
     def __init__(self, missing_parameters):
         self.missing_parameters = missing_parameters
+    def __str__(self):
+        return "(%s)" % (self.missing_parameters)
 
 class FieldDetector(defaultdict):
     Level = 1
@@ -116,7 +128,7 @@
         self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0,0.0,0.0]
         self.RightEdge = [1.0,1.0,1.0]
-        self.dx = self.dy = self.dz = na.array([1.0])
+        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
         self.fields = []
         if pf is None:
             pf = defaultdict(lambda: 1)
@@ -279,6 +291,14 @@
         if isinstance(data, FieldDetector): return True
         if not data._spatial:
             raise NeedsGridType(self.ghost_zones,self.fields)
-        if self.ghost_zones == data._num_ghost_zones:
+        if self.ghost_zones <= data._num_ghost_zones:
             return True
         raise NeedsGridType(self.ghost_zones,self.fields)
+
+class ValidateGridType(FieldValidator):
+    def __init__(self):
+        FieldValidator.__init__(self)
+    def __call__(self, data):
+        # We need to make sure that it's an actual AMR grid
+        if data._type_name == 'grid': return True
+        raise NeedsOriginalGrid()

Modified: trunk/yt/lagos/ParallelTools.py
==============================================================================
--- trunk/yt/lagos/ParallelTools.py	(original)
+++ trunk/yt/lagos/ParallelTools.py	Fri Jan 23 13:32:09 2009
@@ -236,30 +236,23 @@
             mylog.debug("Joining %s (%s) on %s", key, type(data[key]),
                         MPI.COMM_WORLD.rank)
             if MPI.COMM_WORLD.rank == 0:
-                data[key] = na.concatenate([data[key]] +
-                 [MPI.COMM_WORLD.Recv(source=i, tag=0) for i in range(1, np)],
-                    axis=-1)
+                temp_data = []
+                for i in range(1,np):
+                    temp_data.append(_recv_array(source=i, tag=0))
+                data[key] = na.concatenate([data[key]] + temp_data, axis=-1)
             else:
-                MPI.COMM_WORLD.Send(data[key], dest=0, tag=0)
+                _send_array(data[key], dest=0, tag=0)
             self._barrier()
-            data[key] = MPI.COMM_WORLD.Bcast(data[key], root=0)
+            data[key] = _bcast_array(data[key])
         self._barrier()
         return data
 
     @parallel_passthrough
-    def __mpi_recvdict(self, data):
-        # First we receive, then we make a new dict.
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = MPI.COMM_WORLD.Recv(source=i, tag=0)
-            for j in buf: data[j] = na.concatenate([data[j],buf[j]], axis=-1)
-        return data
-
-    @parallel_passthrough
     def __mpi_recvlist(self, data):
         # First we receive, then we make a new list.
         data = ensure_list(data)
         for i in range(1,MPI.COMM_WORLD.size):
-            buf = ensure_list(MPI.COMM_WORLD.Recv(source=i, tag=0))
+            buf = ensure_list(MPI.COMM_WORLD.recv(source=i, tag=0))
             data += buf
         return na.array(data)
 
@@ -269,17 +262,17 @@
         if MPI.COMM_WORLD.rank == 0:
             data = self.__mpi_recvlist(data)
         else:
-            MPI.COMM_WORLD.Send(data, dest=0, tag=0)
+            MPI.COMM_WORLD.send(data, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.Bcast(data, root=0)
+        data = MPI.COMM_WORLD.bcast(data, root=0)
         self._barrier()
         return data
 
     @parallel_passthrough
-    def __mpi_recvarray(self, data):
+    def __mpi_recvarrays(self, data):
         # First we receive, then we make a new list.
         for i in range(1,MPI.COMM_WORLD.size):
-            buf = MPI.COMM_WORLD.Recv(source=i, tag=0)
+            buf = _recv_array(source=i, tag=0)
             data = na.concatenate([data, buf])
         return data
 
@@ -287,9 +280,9 @@
     def _mpi_catarray(self, data):
         self._barrier()
         if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvarray(data)
+            data = self.__mpi_recvarrays(data)
         else:
-            MPI.COMM_WORLD.Send(data, dest=0, tag=0)
+            _send_array(data, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
         data = MPI.COMM_WORLD.Bcast(data, root=0)
         self._barrier()
@@ -308,7 +301,9 @@
     @parallel_passthrough
     def _mpi_allsum(self, data):
         self._barrier()
-        return MPI.COMM_WORLD.Allreduce(data, op=MPI.SUM)
+        # We use old-school pickling here on the assumption the arrays are
+        # relatively small ( < 1e7 elements )
+        return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
 
     def _mpi_info_dict(self, info):
         mylog.info("Parallel capable: %s", parallel_capable)
@@ -318,11 +313,11 @@
         if MPI.COMM_WORLD.rank == 0:
             data = {0:info}
             for i in range(1, MPI.COMM_WORLD.size):
-                data[i] = MPI.COMM_WORLD.Recv(source=i, tag=0)
+                data[i] = MPI.COMM_WORLD.recv(source=i, tag=0)
         else:
-            MPI.COMM_WORLD.Send(info, dest=0, tag=0)
+            MPI.COMM_WORLD.send(info, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.Bcast(data, root=0)
+        data = MPI.COMM_WORLD.bcast(data, root=0)
         self._barrier()
         return MPI.COMM_WORLD.rank, data
 
@@ -344,3 +339,36 @@
             return open(fn, "w")
         else:
             return cStringIO.StringIO()
+
+__tocast = 'c'
+
+def _send_array(arr, dest, tag = 0):
+    if not isinstance(arr, na.ndarray):
+        MPI.COMM_WORLD.send((None,None), dest=dest, tag=tag)
+        MPI.COMM_WORLD.send(arr, dest=dest, tag=tag)
+        return
+    tmp = arr.view(__tocast) # Cast to CHAR
+    # communicate type and shape
+    MPI.COMM_WORLD.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
+    MPI.COMM_WORLD.Send([arr, MPI.CHAR], dest=dest, tag=tag)
+    del tmp
+
+def _recv_array(source, tag = 0):
+    dt, ne = MPI.COMM_WORLD.recv(source=source, tag=tag)
+    if dt is None and ne is None:
+        return MPI.COMM_WORLD.recv(source=source, tag=tag)
+    arr = na.empty(ne, dtype=dt)
+    tmp = arr.view(__tocast)
+    MPI.COMM_WORLD.Recv([tmp, MPI.CHAR], source=source, tag=tag)
+    return arr
+
+def _bcast_array(arr, root = 0):
+    if MPI.COMM_WORLD.rank == root:
+        tmp = arr.view(__tocast) # Cast to CHAR
+        MPI.COMM_WORLD.bcast((arr.dtype.str, arr.shape), root=root)
+    else:
+        dt, ne = MPI.COMM_WORLD.bcast(None, root=root)
+        arr = na.empty(ne, dtype=dt)
+        tmp = arr.view(__tocast)
+    MPI.COMM_WORLD.Bcast([tmp, MPI.CHAR], root=root)
+    return arr

Modified: trunk/yt/lagos/UniversalFields.py
==============================================================================
--- trunk/yt/lagos/UniversalFields.py	(original)
+++ trunk/yt/lagos/UniversalFields.py	Fri Jan 23 13:32:09 2009
@@ -97,13 +97,13 @@
 def _GridLevel(field, data):
     return na.ones(data["Density"].shape)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
-          validators=[#ValidateProperty('Level'),
+          validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
     return na.ones(data["Density"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
-          validators=[#ValidateProperty('id'),
+          validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
@@ -429,7 +429,8 @@
     return na.ones(data["Density"].shape)*-1
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours, validators=[ValidateSpatial(0)],
+add_field("tempContours", function=_Contours,
+          validators=[ValidateSpatial(0), ValidateGridType()],
           take_log=False, display_field=False)
 
 def obtain_velocities(data):

Modified: trunk/yt/lagos/setup.py
==============================================================================

Modified: trunk/yt/raven/Callbacks.py
==============================================================================
--- trunk/yt/raven/Callbacks.py	(original)
+++ trunk/yt/raven/Callbacks.py	Fri Jan 23 13:32:09 2009
@@ -254,13 +254,16 @@
         px_index = lagos.x_dict[plot.data.axis]
         py_index = lagos.y_dict[plot.data.axis]
         dom = plot.data.pf["DomainRightEdge"] - plot.data.pf["DomainLeftEdge"]
-        for px_off, py_off in na.mgrid[-1:1:3j,-1:1:3j]:
-            GLE = plot.data.gridLeftEdge + px_off * dom[px_index]
-            GRE = plot.data.gridRightEdge + py_off * dom[py_index]
-            left_edge_px = na.maximum((GLE[:,px_index]-x0)*dx, xx0)
-            left_edge_py = na.maximum((GLE[:,py_index]-y0)*dy, yy0)
-            right_edge_px = na.minimum((GRE[:,px_index]-x0)*dx, xx1)
-            right_edge_py = na.minimum((GRE[:,py_index]-y0)*dy, yy1)
+        pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+        GLE = plot.data.gridLeftEdge
+        GRE = plot.data.gridRightEdge
+        for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
+            pxo = px_off * dom[px_index]
+            pyo = py_off * dom[py_index]
+            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
+            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
+            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
+            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
             verts = na.array(
                     [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                      (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])



More information about the yt-svn mailing list