[Yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Jun 2 10:53:08 PDT 2011


2 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/9d358a473ed0/
changeset:   9d358a473ed0
branches:    
user:        MatthewTurk
date:        2011-06-02 19:44:29
summary:     Merging of quadtrees works now, and the binary-reduction in the fake reduce
operation works.  quad_proj is now fully parallel, and gives correct answers
for projecting Ones and Density.
affected #:  3 files (568 bytes)

--- a/yt/data_objects/data_containers.py	Thu Jun 02 11:13:59 2011 -0400
+++ b/yt/data_objects/data_containers.py	Thu Jun 02 13:44:29 2011 -0400
@@ -1517,8 +1517,8 @@
 
     def _initialize_source(self, source = None):
         if source is None:
-            check, source = self._partition_hierarchy_2d(self.axis)
-            self._check_region = check
+            source = self.pf.h.all_data()
+            self._check_region = False
             #self._okay_to_serialize = (not check)
         else:
             self._distributed = False
@@ -1565,19 +1565,22 @@
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
-                    len(self.source._grids), self._get_dependencies(fields))
-            self._preload(self.source._grids,
+                    len(self.source._get_grid_objs()),
+                    self._get_dependencies(fields))
+            self._preload([g for g in self._get_grid_objs()],
                           self._get_dependencies(fields), self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload(self.source.select_grids(level),
+                self._preload([g for g in self._get_grid_objs()
+                                 if g.Level == level],
                               self._get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
+        tree = self.merge_quadtree_buffers(tree)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)
@@ -1591,7 +1594,6 @@
             else:
                 ds = 0.0
             dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        #self._tree = tree
         coord_data = na.concatenate(coord_data, axis=0).transpose()
         field_data = na.concatenate(field_data, axis=0).transpose()
         weight_data = na.concatenate(weight_data, axis=0).transpose()
@@ -1609,7 +1611,6 @@
         data['pdy'] = data['pdx'] # generalization is out the window!
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._mpi_catdict(data)
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
@@ -1654,7 +1655,8 @@
                     to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
-        grids_to_project = self.source.select_grids(level)
+        grids_to_project = [g for g in self._get_grid_objs()
+                            if g.Level == level]
         if len(grids_to_project) == 0: return
         dls, convs = self._get_dls(grids_to_project[0], fields)
         zero_out = (level != self._max_level)


--- a/yt/utilities/_amr_utils/QuadTree.pyx	Thu Jun 02 11:13:59 2011 -0400
+++ b/yt/utilities/_amr_utils/QuadTree.pyx	Thu Jun 02 13:44:29 2011 -0400
@@ -109,7 +109,7 @@
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
                   int nvals):
-        self.merged = 0
+        self.merged = 1
         cdef int i, j
         cdef QuadTreeNode *node
         cdef np.int64_t pos[2]
@@ -277,7 +277,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def get_all_from_level(self, int level, int count_only = 0):
-        cdef int i, j
+        cdef int i, j, vi
         cdef int total = 0
         vals = []
         for i in range(self.top_grid_dims[0]):
@@ -371,8 +371,9 @@
     # 4. If n1 has refinement and n2 does not, we add the value of n2 to n1.
     cdef int i, j
 
+    QTN_add_value(n1, n2.val, n2.weight_val)
     if n1.children[0][0] == n2.children[0][0] == NULL:
-        QTN_add_value(n1, n2.val, n2.weight_val)
+        pass
     elif n1.children[0][0] != NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
@@ -383,7 +384,7 @@
                 n1.children[i][j] = n2.children[i][j]
                 n2.children[i][j] = NULL
     elif n1.children[0][0] != NULL and n2.children[0][0] == NULL:
-        QTN_add_value(n1, n2.val, n2.weight_val)
+        pass
     else:
         raise RuntimeError
 


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Jun 02 11:13:59 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Jun 02 13:44:29 2011 -0400
@@ -1255,6 +1255,29 @@
         if not obj._distributed: return True
         return (obj._owner == MPI.COMM_WORLD.rank)
 
+    def _send_quadtree(self, target, qt, tgd, args):
+        sizebuf = na.zeros(1, 'int64')
+        buf = qt.tobuffer()
+        sizebuf[0] = buf[0].size
+        MPI.COMM_WORLD.Send([sizebuf, MPI.LONG], dest=target)
+        MPI.COMM_WORLD.Send([buf[0], MPI.INT], dest=target)
+        MPI.COMM_WORLD.Send([buf[1], MPI.DOUBLE], dest=target)
+        MPI.COMM_WORLD.Send([buf[2], MPI.DOUBLE], dest=target)
+        
+    def _recv_quadtree(self, target, tgd, args):
+        sizebuf = na.zeros(1, 'int64')
+        MPI.COMM_WORLD.Recv(sizebuf, source=target)
+        buf = [na.empty((sizebuf[0],), 'int32'),
+               na.empty((sizebuf[0], args[2]),'float64'),
+               na.empty((sizebuf[0],),'float64')]
+        MPI.COMM_WORLD.Recv([buf[0], MPI.INT], source=target)
+        MPI.COMM_WORLD.Recv([buf[1], MPI.DOUBLE], source=target)
+        MPI.COMM_WORLD.Recv([buf[2], MPI.DOUBLE], source=target)
+        qt = QuadTree(tgd, args[2])
+        qt.frombuffer(*buf)
+        return qt
+
+    @parallel_passthrough
     def merge_quadtree_buffers(self, qt):
         # This is a modified version of pairwise reduction from Lisandro Dalcin,
         # in the reductions demo of mpi4py
@@ -1269,28 +1292,18 @@
 
         while mask < size:
             if (mask & rank) != 0:
-                buf = qt.tobuffer()
-                sizebuf[0] = buf[0].size
                 target = (rank & ~mask) % size
-                MPI.COMM_WORLD.Send([sizebuf, MPI.LONG], dest=target)
-                MPI.COMM_WORLD.Send([buf[0], MPI.INT], dest=target)
-                MPI.COMM_WORLD.Send([buf[1], MPI.DOUBLE], dest=target)
-                MPI.COMM_WORLD.Send([buf[2], MPI.DOUBLE], dest=target)
+                print "SENDING FROM %02i to %02i" % (rank, target)
+                self._send_quadtree(target, qt, tgd, args)
+                qt = self._recv_quadtree(target, tgd, args)
             else:
                 target = (rank | mask)
                 if target < size:
-                    MPI.COMM_WORLD.Recv(sizebuf, source=target)
-                    buf = [na.empty((sizebuf[0],), 'int32'),
-                           na.empty((sizebuf[0], args[2]),'float64'),
-                           na.empty((sizebuf[0],),'float64')]
-                    MPI.COMM_WORLD.Recv([buf[0], MPI.INT], source=target)
-                    MPI.COMM_WORLD.Recv([buf[1], MPI.DOUBLE], source=target)
-                    MPI.COMM_WORLD.Recv([buf[2], MPI.DOUBLE], source=target)
-                    qto = QuadTree(tgd, args[2])
-                    qto.frombuffer(*buf)
-                    del buf
+                    print "RECEIVING FROM %02i on %02i" % (target, rank)
+                    qto = self._recv_quadtree(target, tgd, args)
                     merge_quadtrees(qt, qto)
                     del qto
+                    self._send_quadtree(target, qt, tgd, args)
             mask <<= 1
 
         if rank == 0:


http://bitbucket.org/yt_analysis/yt/changeset/55f465099b39/
changeset:   55f465099b39
branches:    
user:        MatthewTurk
date:        2011-06-02 19:50:11
summary:     For now removing the send-back operation, as I don't think it's necessary.
affected #:  1 file (2 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Jun 02 13:44:29 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Jun 02 13:50:11 2011 -0400
@@ -1295,7 +1295,7 @@
                 target = (rank & ~mask) % size
                 print "SENDING FROM %02i to %02i" % (rank, target)
                 self._send_quadtree(target, qt, tgd, args)
-                qt = self._recv_quadtree(target, tgd, args)
+                #qt = self._recv_quadtree(target, tgd, args)
             else:
                 target = (rank | mask)
                 if target < size:
@@ -1303,7 +1303,7 @@
                     qto = self._recv_quadtree(target, tgd, args)
                     merge_quadtrees(qt, qto)
                     del qto
-                    self._send_quadtree(target, qt, tgd, args)
+                    #self._send_quadtree(target, qt, tgd, args)
             mask <<= 1
 
         if rank == 0:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list