[yt-svn] commit/yt: 8 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Jun 25 06:45:37 PDT 2012


8 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/1b80aea13413/
changeset:   1b80aea13413
branch:      yt
user:        MatthewTurk
date:        2012-05-02 21:18:53
summary:     First pass at a "quadtree extension" fix to allow the QT to scale.  This works
by replicating the entire quadtree structure on every node during the
projection process.
affected #:  4 files

diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r 1b80aea1341373b65d2d6e520d36a66c8438be76 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1646,6 +1646,7 @@
         >>> qproj = pf.h.quad_proj(0, "Density")
         >>> print qproj["Density"]
         """
+        self._ngrids = 0
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
@@ -1760,7 +1761,15 @@
             merge_style = 1
         else:
             raise NotImplementedError
-        tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
+        print self._ngrids
+        #tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
+        buf = list(tree.tobuffer())
+        del tree
+        new_buf = []
+        for i in range(3):
+            new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op='sum'))
+        tree = self._get_tree(len(fields))
+        tree.frombuffer(new_buf[0], new_buf[1], new_buf[2], merge_style)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)
@@ -1803,20 +1812,25 @@
 
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
-        if self._weight is None:
+        self._ngrids += 1
+        if fields is None:
+            field_list = ["Ones"]
+        else:
+            field_list = fields
+        if self._weight is None or fields is None:
             weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
-                           for fd in self._get_data_from_grid(grid, fields)]
+                           for fd in self._get_data_from_grid(grid, field_list)]
             wdl = 1.0
         else:
-            fields_to_get = list(set(fields + [self._weight]))
+            fields_to_get = list(set(field_list + [self._weight]))
             field_data = dict(zip(
                 fields_to_get, self._get_data_from_grid(grid, fields_to_get)))
             weight_data = field_data[self._weight].copy().astype('float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data  = [field_data[field].copy().astype('float64') * weight_data
-                                for field in fields]
+                                for field in field_list]
             del field_data
             wdl = dls[-1]
         full_proj = [self.func(field, axis=self.axis) * dl
@@ -1835,21 +1849,33 @@
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
-                    to_add, weight_proj[used_points].ravel())
+                    to_add, weight_proj[used_points].ravel(),
+                    skip = int(fields is None))
 
     def _add_level_to_tree(self, tree, level, fields):
         grids_to_project = [g for g in self._get_grid_objs()
                             if g.Level == level]
-        if len(grids_to_project) == 0: return
-        dls, convs = self._get_dls(grids_to_project[0], fields)
+        grids_to_initialize = [g for g in self._grids
+                                if (g.Level == level)
+                                and (g not in grids_to_project)]
         zero_out = (level != self._max_level)
-        pbar = get_pbar('Projecting  level % 2i / % 2i ' \
-                          % (level, self._max_level), len(grids_to_project))
-        for pi, grid in enumerate(grids_to_project):
-            self._add_grid_to_tree(tree, grid, fields, zero_out, dls)
-            pbar.update(pi)
-            grid.clear_data()
-        pbar.finish()
+        if len(grids_to_project) > 0:
+            dls, convs = self._get_dls(grids_to_project[0], fields)
+            pbar = get_pbar('Projecting  level % 2i / % 2i ' \
+                              % (level, self._max_level), len(grids_to_project))
+            for pi, grid in enumerate(grids_to_project):
+                self._add_grid_to_tree(tree, grid, fields, zero_out, dls)
+                pbar.update(pi)
+                grid.clear_data()
+            pbar.finish()
+        if len(grids_to_initialize) > 0:
+            pbar = get_pbar('Extending tree % 2i / % 2i' \
+                              % (level, self._max_level), len(grids_to_initialize))
+            for pi, grid in enumerate(grids_to_initialize):
+                self._add_grid_to_tree(tree, grid, None, zero_out, [1.0])
+                pbar.update(pi)
+                grid.clear_data()
+            pbar.finish()
         return
 
     def _get_points_in_region(self, grid):


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r 1b80aea1341373b65d2d6e520d36a66c8438be76 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -144,7 +144,7 @@
             else:
                 self[field] = self.pf.field_info[field](self)
         else: # Can't find the field, try as it might
-            raise exceptions.KeyError, field
+            raise exceptions.KeyError(field)
 
     def has_key(self, key):
         return (key in self.field_data)


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r 1b80aea1341373b65d2d6e520d36a66c8438be76 yt/utilities/_amr_utils/QuadTree.pyx
--- a/yt/utilities/_amr_utils/QuadTree.pyx
+++ b/yt/utilities/_amr_utils/QuadTree.pyx
@@ -250,7 +250,7 @@
     cdef void add_to_position(self,
                  int level, np.int64_t pos[2],
                  np.float64_t *val,
-                 np.float64_t weight_val):
+                 np.float64_t weight_val, skip = 0):
         cdef int i, j, L
         cdef QuadTreeNode *node
         node = self.find_on_root_level(pos, level)
@@ -264,6 +264,7 @@
             i = (pos[0] >= fac*(2*node.pos[0]+1))
             j = (pos[1] >= fac*(2*node.pos[1]+1))
             node = node.children[i][j]
+        if skip == 1: return
         self.combine(node, val, weight_val, self.nvals)
             
     @cython.cdivision(True)
@@ -275,14 +276,14 @@
         j = <np.int64_t> (pos[1] / self.po2[level])
         return self.root_nodes[i][j]
         
-    
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def add_array_to_tree(self, int level,
             np.ndarray[np.int64_t, ndim=1] pxs,
             np.ndarray[np.int64_t, ndim=1] pys,
             np.ndarray[np.float64_t, ndim=2] pvals,
-            np.ndarray[np.float64_t, ndim=1] pweight_vals):
+            np.ndarray[np.float64_t, ndim=1] pweight_vals,
+            int skip = 0):
         cdef int np = pxs.shape[0]
         cdef int p
         cdef cnp.float64_t *vals
@@ -292,7 +293,7 @@
             vals = data + self.nvals*p
             pos[0] = pxs[p]
             pos[1] = pys[p]
-            self.add_to_position(level, pos, vals, pweight_vals[p])
+            self.add_to_position(level, pos, vals, pweight_vals[p], skip)
         return
 
     def add_grid_to_tree(self, int level,


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r 1b80aea1341373b65d2d6e520d36a66c8438be76 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -671,6 +671,10 @@
 
         mask = 1
 
+        buf = qt.tobuffer()
+        print "PROC", rank, buf[0].shape, buf[1].shape, buf[2].shape
+        sys.exit()
+
         args = qt.get_args() # Will always be the same
         tgd = na.array([args[0], args[1]], dtype='int64')
         sizebuf = na.zeros(1, 'int64')



https://bitbucket.org/yt_analysis/yt/changeset/f152e114c976/
changeset:   f152e114c976
branch:      yt
user:        MatthewTurk
date:        2012-05-02 21:23:39
summary:     If we don't have to preload any fields, skip calling the io_handler's preload
function.  The preload function might, for instance, include some touching of
the filesystem we want to avoid.
affected #:  1 file

diff -r 1b80aea1341373b65d2d6e520d36a66c8438be76 -r f152e114c976bf9c7188b207f670ca686b067670 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -530,6 +530,7 @@
     def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
         # if so, we load *everything* that we need.  Use with some care.
+        if len(fields) == 0: return
         mylog.debug("Preloading %s from %s grids", fields, len(grids))
         if not self._distributed: return
         io_handler.preload(grids, fields)



https://bitbucket.org/yt_analysis/yt/changeset/d848f65f0610/
changeset:   d848f65f0610
branch:      yt
user:        MatthewTurk
date:        2012-05-02 21:34:12
summary:     Remove the _ngrids stuff, and switch to sets for figuring out the
grids_to_initialize list.
affected #:  1 file

diff -r f152e114c976bf9c7188b207f670ca686b067670 -r d848f65f0610807954968bf0b2ac8197ef73940d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1646,7 +1646,6 @@
         >>> qproj = pf.h.quad_proj(0, "Density")
         >>> print qproj["Density"]
         """
-        self._ngrids = 0
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
@@ -1761,7 +1760,6 @@
             merge_style = 1
         else:
             raise NotImplementedError
-        print self._ngrids
         #tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
         buf = list(tree.tobuffer())
         del tree
@@ -1812,7 +1810,6 @@
 
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
-        self._ngrids += 1
         if fields is None:
             field_list = ["Ones"]
         else:
@@ -1855,9 +1852,9 @@
     def _add_level_to_tree(self, tree, level, fields):
         grids_to_project = [g for g in self._get_grid_objs()
                             if g.Level == level]
+        gids = set(g.id for g in grids_to_project)
         grids_to_initialize = [g for g in self._grids
-                                if (g.Level == level)
-                                and (g not in grids_to_project)]
+                                if (g.Level == level) and (g.id not in gids)]
         zero_out = (level != self._max_level)
         if len(grids_to_project) > 0:
             dls, convs = self._get_dls(grids_to_project[0], fields)



https://bitbucket.org/yt_analysis/yt/changeset/0747f9869b4e/
changeset:   0747f9869b4e
branch:      yt
user:        MatthewTurk
date:        2012-05-02 21:57:44
summary:     Remove a "where" that was being poorly used.  Now extend the quadtree using a
manual grid-based initialization, rather than generating all of the masks and
whatnot.  This has the result of potentially making the QuadTree much larger
during the course of the sim, but the upshot is that it will be identical
everywhere and the grids that are added will be correctly added.
affected #:  2 files

diff -r d848f65f0610807954968bf0b2ac8197ef73940d -r 0747f9869b4e1193f6fe2208cd2363d43f4bee9b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1810,24 +1810,20 @@
 
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
-        if fields is None:
-            field_list = ["Ones"]
-        else:
-            field_list = fields
         if self._weight is None or fields is None:
             weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
-                           for fd in self._get_data_from_grid(grid, field_list)]
+                           for fd in self._get_data_from_grid(grid, fields)]
             wdl = 1.0
         else:
-            fields_to_get = list(set(field_list + [self._weight]))
+            fields_to_get = list(set(fields + [self._weight]))
             field_data = dict(zip(
                 fields_to_get, self._get_data_from_grid(grid, fields_to_get)))
             weight_data = field_data[self._weight].copy().astype('float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data  = [field_data[field].copy().astype('float64') * weight_data
-                                for field in field_list]
+                                for field in fields]
             del field_data
             wdl = dls[-1]
         full_proj = [self.func(field, axis=self.axis) * dl
@@ -1835,7 +1831,7 @@
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = na.logical_or.reduce(used_data, self.axis)
         else:
             used_data = na.array([1.0], dtype='bool')
             used_points = slice(None)
@@ -1846,16 +1842,29 @@
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
-                    to_add, weight_proj[used_points].ravel(),
-                    skip = int(fields is None))
+                    to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
         grids_to_project = [g for g in self._get_grid_objs()
                             if g.Level == level]
-        gids = set(g.id for g in grids_to_project)
-        grids_to_initialize = [g for g in self._grids
-                                if (g.Level == level) and (g.id not in gids)]
+        grids_to_initialize = [g for g in self._grids if (g.Level == level)]
         zero_out = (level != self._max_level)
+        if len(grids_to_initialize) == 0: return
+        pbar = get_pbar('Initializing tree % 2i / % 2i' \
+                          % (level, self._max_level), len(grids_to_initialize))
+        start_index = na.empty(2, dtype="int64")
+        dims = na.empty(2, dtype="int64")
+        xax = x_dict[self.axis]
+        yax = y_dict[self.axis]
+        for pi, grid in enumerate(grids_to_initialize):
+            dims[0] = grid.ActiveDimensions[xax]
+            dims[1] = grid.ActiveDimensions[yax]
+            ind = grid.get_global_startindex()
+            start_index[0] = ind[xax]
+            start_index[1] = ind[yax]
+            tree.initialize_grid(level, start_index, dims)
+            pbar.update(pi)
+        pbar.finish()
         if len(grids_to_project) > 0:
             dls, convs = self._get_dls(grids_to_project[0], fields)
             pbar = get_pbar('Projecting  level % 2i / % 2i ' \
@@ -1865,14 +1874,6 @@
                 pbar.update(pi)
                 grid.clear_data()
             pbar.finish()
-        if len(grids_to_initialize) > 0:
-            pbar = get_pbar('Extending tree % 2i / % 2i' \
-                              % (level, self._max_level), len(grids_to_initialize))
-            for pi, grid in enumerate(grids_to_initialize):
-                self._add_grid_to_tree(tree, grid, None, zero_out, [1.0])
-                pbar.update(pi)
-                grid.clear_data()
-            pbar.finish()
         return
 
     def _get_points_in_region(self, grid):


diff -r d848f65f0610807954968bf0b2ac8197ef73940d -r 0747f9869b4e1193f6fe2208cd2363d43f4bee9b yt/utilities/_amr_utils/QuadTree.pyx
--- a/yt/utilities/_amr_utils/QuadTree.pyx
+++ b/yt/utilities/_amr_utils/QuadTree.pyx
@@ -296,12 +296,19 @@
             self.add_to_position(level, pos, vals, pweight_vals[p], skip)
         return
 
-    def add_grid_to_tree(self, int level,
-                         np.ndarray[np.int64_t, ndim=1] start_index,
-                         np.ndarray[np.float64_t, ndim=2] pvals,
-                         np.ndarray[np.float64_t, ndim=2] wvals,
-                         np.ndarray[np.int32_t, ndim=2] cm):
-        pass
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def initialize_grid(self, int level,
+                        cnp.ndarray[np.int64_t, ndim=1] start_index, 
+                        cnp.ndarray[np.int64_t, ndim=1] dims):
+        # We assume that start_index is indices 0 and 1 of ourself
+        cdef int i, j
+        cdef cnp.int64_t pos[2]
+        for i in range(dims[0]):
+            pos[0] = i + start_index[0]
+            for j in range(dims[1]):
+                pos[1] = j + start_index[1]
+                self.add_to_position(level, pos, NULL, 0.0, 1)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)



https://bitbucket.org/yt_analysis/yt/changeset/3f39eb7bf468/
changeset:   3f39eb7bf468
branch:      yt
user:        MatthewTurk
date:        2012-05-03 00:09:28
summary:     Fix the way buffers are done; still needs to be fixed for the particular merge
style.
affected #:  1 file

diff -r 0747f9869b4e1193f6fe2208cd2363d43f4bee9b -r 3f39eb7bf468d51fea9deb1c0edfcce785a87a8b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1763,9 +1763,9 @@
         #tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
         buf = list(tree.tobuffer())
         del tree
-        new_buf = []
-        for i in range(3):
-            new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op='sum'))
+        new_buf = [buf.pop(0)]
+        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op='sum'))
+        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op='sum'))
         tree = self._get_tree(len(fields))
         tree.frombuffer(new_buf[0], new_buf[1], new_buf[2], merge_style)
         coord_data, field_data, weight_data, dxs = [], [], [], []



https://bitbucket.org/yt_analysis/yt/changeset/05928ba31854/
changeset:   05928ba31854
branch:      yt
user:        MatthewTurk
date:        2012-05-08 04:26:38
summary:     Change to use the correct operation based on the quad tree projection style.
affected #:  1 file

diff -r 3f39eb7bf468d51fea9deb1c0edfcce785a87a8b -r 05928ba3185474576eb800c632a88e7732127642 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1756,16 +1756,18 @@
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
             merge_style = -1
+            op = "max"
         elif self.proj_style == "integrate":
             merge_style = 1
+            op = "sum"
         else:
             raise NotImplementedError
         #tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
         buf = list(tree.tobuffer())
         del tree
         new_buf = [buf.pop(0)]
-        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op='sum'))
-        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op='sum'))
+        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op=op))
+        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op=op))
         tree = self._get_tree(len(fields))
         tree.frombuffer(new_buf[0], new_buf[1], new_buf[2], merge_style)
         coord_data, field_data, weight_data, dxs = [], [], [], []



https://bitbucket.org/yt_analysis/yt/changeset/0548c9e539b8/
changeset:   0548c9e539b8
branch:      yt
user:        MatthewTurk
date:        2012-06-05 16:45:20
summary:     Turn off preloading in parallel for projections, as this kills large datasets.
affected #:  1 file

diff -r 05928ba3185474576eb800c632a88e7732127642 -r 0548c9e539b81d4f67921c50a379b03ecdf21e8d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1582,7 +1582,7 @@
     def __init__(self, axis, field, weight_field = None,
                  max_level = None, center = None, pf = None,
                  source=None, node_name = None, field_cuts = None,
-                 preload_style='level', serialize=True,
+                 preload_style=None, serialize=True,
                  style = "integrate", **kwargs):
         """
         This is a data object corresponding to a line integral through the
@@ -1630,9 +1630,9 @@
             region of a grid out.  They can be of the form "grid['Temperature']
             > 100" for instance.
         preload_style : string
-            Either 'level' (default) or 'all'.  Defines how grids are loaded --
-            either level by level, or all at once.  Only applicable during
-            parallel runs.
+            Either 'level', 'all', or None (default).  Defines how grids are
+            loaded -- either level by level, or all at once.  Only applicable
+            during parallel runs.
         serialize : bool, optional
             Whether we should store this projection in the .yt file or not.
         kwargs : dict of items



https://bitbucket.org/yt_analysis/yt/changeset/bcd33b464c7e/
changeset:   bcd33b464c7e
branch:      yt
user:        MatthewTurk
date:        2012-06-24 15:33:37
summary:     Merging from tip
affected #:  169 files
Diff too large to display.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list