[yt-svn] commit/yt-3.0: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Aug 13 12:47:47 PDT 2012


2 new commits in yt-3.0:


https://bitbucket.org/yt_analysis/yt-3.0/changeset/07480041f1d5/
changeset:   07480041f1d5
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-13 17:05:14
summary:     Fixing a few missing variables
affected #:  1 file

diff -r e6db01a719d6c88f4288f515af2d847a4f3d4d08 -r 07480041f1d5a7055eb47a53528eddca4d9b8aa5 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -151,7 +151,7 @@
                 cur = cur.next
             o = &cur.my_octs[oi - cur.offset]
             for i in range(8):
-                count[o.domain - 1] += mask[oi,((k*2)+j)*2+i]
+                count[o.domain - 1] += mask[oi,i]
         return count
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
@@ -237,7 +237,6 @@
             if cur == NULL:
                 if curlevel != 0:
                     raise RuntimeError
-                assert(cont.n_assigned < cont.n)
                 cur = &cont.my_octs[cont.n_assigned]
                 cur.local_ind = cont.offset + cont.n_assigned
                 cur.parent = NULL
@@ -247,7 +246,6 @@
                 cont.n_assigned += 1
                 self.nocts += 1
                 self.root_mesh[ind[0]][ind[1]][ind[2]] = cur
-                assert(cur.level == curlevel)
             # Now we find the location we want
             # Note that RAMSES I think 1-findiceses levels, but we don't.
             for level in range(curlevel):
@@ -265,8 +263,6 @@
                 # Check if it has not been allocated
                 next = cur.children[ind[0]][ind[1]][ind[2]]
                 if next == NULL:
-                    if cont.n_assigned >= cont.n:
-                        raise AssertionError
                     next = &cont.my_octs[cont.n_assigned]
                     cur.children[ind[0]][ind[1]][ind[2]] = next
                     next.local_ind = cont.offset + cont.n_assigned
@@ -276,7 +272,6 @@
                         next.pos[i] = ind[i] + (cur.pos[i] << 1)
                     next.level = level + 1
                     self.nocts += 1
-                    assert(next.level == curlevel)
                 cur = next
             cur.domain = curdom
             if local == 1:
@@ -300,7 +295,6 @@
         coords = np.empty((cell_count, 3), dtype="int64")
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
-            if o.level != level: continue
             for i in range(2):
                 for j in range(2):
                     for k in range(2):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2d4f73ae6853/
changeset:   2d4f73ae6853
branch:      yt-3.0
user:        MatthewTurk
date:        2012-08-13 21:47:10
summary:     Rewriting projection, but removing a number of pieces of functionality.  It now
functions correctly in serial.

The removed functionality should be added back once the data_containers have
been completely replaced, and they will also be supplied through classmethod
constructors.  This way we can reduce the number of total arguments, but retain
the overall functionality.
affected #:  5 files

diff -r 07480041f1d5a7055eb47a53528eddca4d9b8aa5 -r 2d4f73ae68539c01fce350f122aedb415c6bcf02 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -149,15 +149,13 @@
 
 
 class YTQuadTreeProjBase(YTSelectionContainer2D):
-    _top_node = "/Projections"
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
     _con_args = ('axis', 'field', 'weight_field')
+    _container_fields = ('px', 'py', 'pdx', 'pdy')
     def __init__(self, field, axis, weight_field = None,
-                 max_level = None, center = None, pf = None,
-                 source=None, node_name = None, field_cuts = None,
-                 preload_style='level', serialize=True,
-                 style = "integrate", **kwargs):
+                 center = None, pf = None, data_source=None, 
+                 style = "integrate", field_parameters = None):
         """
         This is a data object corresponding to a line integral through the
         simulation domain.
@@ -220,7 +218,7 @@
         >>> qproj = pf.h.proj("Density", 0)
         >>> print qproj["Density"]
         """
-        YTSelectionContainer2D.__init__(self, axis, field, pf, node_name = None, **kwargs)
+        YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
         self.proj_style = style
         if style == "mip":
             self.func = na.max
@@ -231,21 +229,12 @@
         else:
             raise NotImplementedError(style)
         self.weight_field = weight_field
-        self._field_cuts = field_cuts
-        self.serialize = serialize
         self._set_center(center)
         if center is not None: self.set_field_parameter('center',center)
-        self._node_name = node_name
-        self._initialize_source(source)
-        self._grids = self.source._grids
-        if max_level == None:
-            max_level = self.hierarchy.max_level
-        if self.source is not None:
-            max_level = min(max_level, self.source.grid_levels.max())
-        self._max_level = max_level
-        self._weight = weight_field
-        self.preload_style = preload_style
-        self._refresh_data()
+        if data_source is None: data_source = self.pf.h.all_data()
+        self.data_source = data_source
+        self.weight_field = weight_field
+        self.get_data(field)
 
     @property
     def _mrep(self):
@@ -254,63 +243,38 @@
     def hub_upload(self):
         self._mrep.upload()
 
-    def _convert_field_name(self, field):
-        if field == "weight_field": return "weight_field_%s" % self._weight
-        if field in self._key_fields: return field
-        return "%s_%s" % (field, self._weight)
-
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
         return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
-    def _get_dls(self, grid, fields):
+    def _get_conv(self, fields):
         # Place holder for a time when maybe we will not be doing just
         # a single dx for every field.
-        dls = []
-        convs = []
-        for field in fields + [self._weight]:
-            if field is None: continue
-            dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
-            convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        dls = na.array(dls)
-        convs = na.array(convs)
-        if self.proj_style == "mip":
-            dls[:] = 1.0
-            convs[:] = 1.0
-        return dls, convs
+        convs = na.empty(len(fields), dtype="float64")
+        fields = self._determine_fields(fields)
+        for i, field in enumerate(fields):
+            fi = self._get_field_info(*field)
+            convs[i] = (self.pf.units[fi.projection_conversion])
+        return convs
 
-    def get_data(self, fields):
-        fields = ensure_list(fields)
+    def get_data(self, fields = None):
+        fields = self._determine_fields(ensure_list(fields))
         # We need a new tree for every single set of fields we add
-        self._obtain_fields(fields, self._node_name)
-        fields = [f for f in fields if f not in self.field_data]
         if len(fields) == 0: return
+        chunk_fields = fields[:]
+        if self.weight_field is not None:
+            chunk_fields.append(self.weight_field)
         tree = self._get_tree(len(fields))
-        coord_data = []
-        field_data = []
-        dxs = []
-        # We do this here, but I am not convinced it should be done here
-        # It is probably faster, as it consolidates IO, but if we did it in
-        # _project_level, then it would be more memory conservative
-        if self.preload_style == 'all':
-            dependencies = self.get_dependencies(fields, ghost_zones = False)
-            mylog.debug("Preloading %s grids and getting %s",
-                            len(self.source._get_grid_objs()),
-                            dependencies)
-            self.comm.preload([g for g in self._get_grid_objs()],
-                          dependencies, self.hierarchy.io)
-        # By changing the remove-from-tree method to accumulate, we can avoid
-        # having to do this by level, and instead do it by CPU file
-        for level in range(0, self._max_level+1):
-            if self.preload_style == 'level':
-                self.comm.preload([g for g in self._get_grid_objs()
-                                 if g.Level == level],
-                              self.get_dependencies(fields), self.hierarchy.io)
-            self._add_level_to_tree(tree, level, fields)
-            mylog.debug("End of projecting level level %s, memory usage %0.3e",
-                        level, get_memory_usage()/1024.)
+        # We do this once
+        for chunk in self.data_source.chunks(None, "io"):
+            continue
+            self._initialize_chunk(chunk, tree)
+        # This needs to be parallel_objects-ified
+        for chunk in self.data_source.chunks(chunk_fields, "io"): 
+            mylog.debug("Adding chunk (%s) to tree", chunk.size)
+            self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
             merge_style = -1
@@ -318,145 +282,59 @@
             merge_style = 1
         else:
             raise NotImplementedError
-        buf = list(tree.tobuffer())
-        del tree
-        new_buf = [buf.pop(0)]
-        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op=op))
-        new_buf.append(self.comm.mpi_allreduce(buf.pop(0), op=op))
-        tree = self._get_tree(len(fields))
-        tree.frombuffer(new_buf[0], new_buf[1], new_buf[2], merge_style)
-        coord_data, field_data, weight_data, dxs = [], [], [], []
-        for level in range(0, self._max_level + 1):
-            npos, nvals, nwvals = tree.get_all_from_level(level, False)
-            coord_data.append(npos)
-            if self._weight is not None: nvals /= nwvals[:,None]
-            field_data.append(nvals)
-            weight_data.append(nwvals)
-            gs = self.source.select_grids(level)
-            if len(gs) > 0:
-                ds = gs[0].dds[0]
-            else:
-                ds = 0.0
-            dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        coord_data = na.concatenate(coord_data, axis=0).transpose()
-        field_data = na.concatenate(field_data, axis=0).transpose()
-        if self._weight is None:
-            dls, convs = self._get_dls(self._grids[0], fields)
-            field_data *= convs[:,None]
-        weight_data = na.concatenate(weight_data, axis=0).transpose()
-        dxs = na.concatenate(dxs, axis=0).transpose()
+        # TODO: Add the combine operation
+        ox = self.pf.domain_left_edge[x_dict[self.axis]]
+        oy = self.pf.domain_left_edge[y_dict[self.axis]]
+        px, py, pdx, pdy, nvals, nwvals = tree.get_all(False)
+
+        na.multiply(px, self.pf.domain_width[x_dict[self.axis]], px)
+        na.add(px, ox, px)
+        na.multiply(pdx, self.pf.domain_width[x_dict[self.axis]], pdx)
+
+        na.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
+        na.add(py, oy, py)
+        na.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
+
+        if self.weight_field is not None:
+            na.divide(nvals, nwvals[:,None], nvals)
+        if self.weight_field is None:
+            convs = self._get_conv(fields)
+            nvals *= convs[None,:]
         # We now convert to half-widths and center-points
         data = {}
-        data['pdx'] = dxs
-        ox = self.pf.domain_left_edge[x_dict[self.axis]]
-        oy = self.pf.domain_left_edge[y_dict[self.axis]]
-        data['px'] = (coord_data[0,:]+0.5) * data['pdx'] + ox
-        data['py'] = (coord_data[1,:]+0.5) * data['pdx'] + oy
-        data['weight_field'] = weight_data
-        del coord_data
-        data['pdx'] *= 0.5
-        data['pdy'] = data['pdx'] # generalization is out the window!
-        data['fields'] = field_data
+        data['px'] = px
+        data['py'] = py
+        data['weight_field'] = nwvals
+        data['pdx'] = pdx
+        data['pdy'] = pdy
+        data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
+            mylog.debug("Setting field %s", field)
             self[field] = field_data[fi].ravel()
-            if self.serialize: self._store_fields(field, self._node_name)
         for i in data.keys(): self[i] = data.pop(i)
         mylog.info("Projection completed")
 
-    def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
-        # We build up the fields to add
-        if self._weight is None or fields is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
-            if zero_out: weight_data[grid.child_indices] = 0
-            masked_data = [fd.astype('float64') * weight_data
-                           for fd in self._get_data_from_grid(grid, fields)]
-            wdl = 1.0
+    def _handle_chunk(self, chunk, fields, tree):
+        if self.proj_style == "mip":
+            dl = 1.0
         else:
-            fields_to_get = list(set(fields + [self._weight]))
-            field_data = dict(zip(
-                fields_to_get, self._get_data_from_grid(grid, fields_to_get)))
-            weight_data = field_data[self._weight].copy().astype('float64')
-            if zero_out: weight_data[grid.child_indices] = 0
-            masked_data  = [field_data[field].copy().astype('float64') * weight_data
-                                for field in fields]
-            del field_data
-            wdl = dls[-1]
-        full_proj = [self.func(field, axis=self.axis) * dl
-                     for field, dl in zip(masked_data, dls)]
-        weight_proj = self.func(weight_data, axis=self.axis) * wdl
-        if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
-            used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.logical_or.reduce(used_data, self.axis)
+            dl = chunk.fwidth[:, self.axis]
+        v = na.empty((chunk.size, len(fields)), dtype="float64")
+        for i in range(len(fields)):
+            v[:,i] = chunk[fields[i]] * dl
+        if self.weight_field is not None:
+            w = chunk[self.weight_field]
+            na.multiply(v, w[:,None], v)
+            na.multiply(w, dl, w)
         else:
-            used_data = na.array([1.0], dtype='bool')
-            used_points = slice(None)
-        xind, yind = [arr[used_points].ravel()
-                      for arr in na.indices(full_proj[0].shape)]
-        start_index = grid.get_global_startindex()
-        xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
-        ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
-        tree.add_array_to_tree(grid.Level, xpoints, ypoints,
-                    to_add, weight_proj[used_points].ravel())
-
-    def _add_level_to_tree(self, tree, level, fields):
-        grids_to_project = [g for g in self._get_grid_objs()
-                            if g.Level == level]
-        grids_to_initialize = [g for g in self._grids if (g.Level == level)]
-        zero_out = (level != self._max_level)
-        pbar = get_pbar('Projecting  level % 2i / % 2i ' \
-                          % (level, self._max_level), len(grids_to_project))
-        if len(grids_to_initialize) == 0: return
-        pbar = get_pbar('Initializing tree % 2i / % 2i' \
-                          % (level, self._max_level), len(grids_to_initialize))
-        start_index = na.empty(2, dtype="int64")
-        dims = na.empty(2, dtype="int64")
-        xax = x_dict[self.axis]
-        yax = y_dict[self.axis]
-        for pi, grid in enumerate(grids_to_initialize):
-            dims[0] = grid.ActiveDimensions[xax]
-            dims[1] = grid.ActiveDimensions[yax]
-            ind = grid.get_global_startindex()
-            start_index[0] = ind[xax]
-            start_index[1] = ind[yax]
-            tree.initialize_grid(level, start_index, dims)
-            pbar.update(pi)
-            grid.clear_data()
-        pbar.finish()
-        if len(grids_to_project) > 0:
-            dls, convs = self._get_dls(grids_to_project[0], fields)
-            pbar = get_pbar('Projecting  level % 2i / % 2i ' \
-                              % (level, self._max_level), len(grids_to_project))
-            for pi, grid in enumerate(grids_to_project):
-                self._add_grid_to_tree(tree, grid, fields, zero_out, dls)
-                pbar.update(pi)
-                grid.clear_data()
-            pbar.finish()
-        return
-
-    def _get_points_in_region(self, grid):
-        pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
-        point_mask[pointI] = 1.0
-        if self._field_cuts is not None:
-            for cut in self._field_cuts:
-                point_mask *= eval(cut)
-        return point_mask
-
-    def _get_data_from_grid(self, grid, fields):
-        fields = ensure_list(fields)
-        if self._check_region:
-            bad_points = self._get_points_in_region(grid)
-        else:
-            bad_points = 1.0
-        return [grid[field] * bad_points for field in fields]
-
-    def _gen_node_name(self):
-        return  "%s/%s" % \
-            (self._top_node, self.axis)
-
+            w = na.ones(chunk.size, dtype="float64")
+        icoords = chunk.icoords
+        i1 = icoords[:,0]
+        i2 = icoords[:,1]
+        ilevel = chunk.ires
+        tree.add_chunk_to_tree(i1, i2, ilevel, v, w)
 
 class YTOverlapProjBase(YTSelectionContainer2D):
     _top_node = "/Projections"


diff -r 07480041f1d5a7055eb47a53528eddca4d9b8aa5 -r 2d4f73ae68539c01fce350f122aedb415c6bcf02 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -367,7 +367,6 @@
                     raise YTFieldNotParseable(field)
                 ftype, fname = field
                 finfo = self._get_field_info(ftype, fname)
-                explicit_fields.append(field)
             else:
                 fname = field
                 finfo = self._get_field_info("unknown", fname)
@@ -444,7 +443,7 @@
             finfo = self._get_field_info(ftype, fname)
             if finfo.particle_type:
                 particles.append((ftype, fname))
-            else:
+            elif (ftype, fname) not in fluids:
                 fluids.append((ftype, fname))
         # The _read method will figure out which fields it needs to get from
         # disk, and return a dict of those fields along with the fields that


diff -r 07480041f1d5a7055eb47a53528eddca4d9b8aa5 -r 2d4f73ae68539c01fce350f122aedb415c6bcf02 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -50,7 +50,7 @@
                 content = cStringIO.StringIO(f.read())
                 rv = subset.fill(content, fields)
                 for ft, f in fields:
-                    print "Filling %s with %s (%0.3e %0.3e) (%s:%s)" % (
+                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
                         f, subset.cell_count, rv[f].min(), rv[f].max(),
                         cp, cp+subset.cell_count)
                     tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)


diff -r 07480041f1d5a7055eb47a53528eddca4d9b8aa5 -r 2d4f73ae68539c01fce350f122aedb415c6bcf02 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -281,8 +281,7 @@
     
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def add_array_to_tree(self, int level,
-            np.ndarray[np.int64_t, ndim=1] pxs,
+    def add_array_to_tree(self, int level, np.ndarray[np.int64_t, ndim=1] pxs,
             np.ndarray[np.int64_t, ndim=1] pys,
             np.ndarray[np.float64_t, ndim=2] pvals,
             np.ndarray[np.float64_t, ndim=1] pweight_vals,
@@ -307,12 +306,12 @@
             np.ndarray[np.int64_t, ndim=1] level,
             np.ndarray[np.float64_t, ndim=2] pvals,
             np.ndarray[np.float64_t, ndim=1] pweight_vals):
-        cdef int np = pxs.shape[0]
+        cdef int ps = pxs.shape[0]
         cdef int p
         cdef cnp.float64_t *vals
         cdef cnp.float64_t *data = <cnp.float64_t *> pvals.data
         cdef cnp.int64_t pos[2]
-        for p in range(np):
+        for p in range(ps):
             vals = data + self.nvals*p
             pos[0] = pxs[p]
             pos[1] = pys[p]
@@ -337,23 +336,32 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def get_all_from_level(self, int level, int count_only = 0):
+    def get_all(self, int count_only = 0):
         cdef int i, j, vi
         cdef int total = 0
         vals = []
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
-                total += self.count_at_level(self.root_nodes[i][j], level, 0)
+                total += self.count(self.root_nodes[i][j])
         if count_only: return total
         # Allocate our array
-        cdef np.ndarray[np.int64_t, ndim=2] npos
+        cdef np.ndarray[np.float64_t, ndim=1] opx
+        cdef np.ndarray[np.float64_t, ndim=1] opy
+        cdef np.ndarray[np.float64_t, ndim=1] opdx
+        cdef np.ndarray[np.float64_t, ndim=1] opdy
         cdef np.ndarray[np.float64_t, ndim=2] nvals
         cdef np.ndarray[np.float64_t, ndim=1] nwvals
-        npos = np.zeros( (total, 2), dtype='int64')
+        opx = np.zeros(total, dtype='float64')
+        opy = np.zeros(total, dtype='float64')
+        opdx = np.zeros(total, dtype='float64')
+        opdy = np.zeros(total, dtype='float64')
         nvals = np.zeros( (total, self.nvals), dtype='float64')
         nwvals = np.zeros( total, dtype='float64')
         cdef np.int64_t curpos = 0
-        cdef np.int64_t *pdata = <np.int64_t *> npos.data
+        cdef np.float64_t *px = <np.float64_t *> opx.data
+        cdef np.float64_t *py = <np.float64_t *> opy.data
+        cdef np.float64_t *pdx = <np.float64_t *> opdx.data
+        cdef np.float64_t *pdy = <np.float64_t *> opdy.data
         cdef np.float64_t *vdata = <np.float64_t *> nvals.data
         cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
         cdef np.float64_t wtoadd
@@ -363,37 +371,32 @@
             for j in range(self.top_grid_dims[1]):
                 for vi in range(self.nvals): vtoadd[vi] = 0.0
                 wtoadd = 0.0
-                curpos += self.fill_from_level(self.root_nodes[i][j],
-                    level, curpos, pdata, vdata, wdata, vtoadd, wtoadd, 0)
-        return npos, nvals, nwvals
+                curpos += self.fill(self.root_nodes[i][j],
+                    curpos, px, py, pdx, pdy, vdata, wdata, vtoadd, wtoadd, 0)
+        return opx, opy, opdx, opdy, nvals, nwvals
 
-    cdef int count_at_level(self, QuadTreeNode *node, int level, int cur_level):
+    cdef int count(self, QuadTreeNode *node):
         cdef int i, j
-        # We only really return a non-zero, calculated value if we are at the
-        # level in question.
-        if cur_level == level:
-            # We return 1 if there are no finer points at this level and zero
-            # if there are
-            return (node.children[0][0] == NULL)
-        if node.children[0][0] == NULL: return 0
         cdef int count = 0
+        if node.children[0][0] == NULL: return 1
         for i in range(2):
             for j in range(2):
-                count += self.count_at_level(node.children[i][j], level,
-                                             cur_level + 1)
+                count += self.count(node.children[i][j])
         return count
 
-    cdef int fill_from_level(self, QuadTreeNode *node, int level,
-                              np.int64_t curpos,
-                              np.int64_t *pdata,
-                              np.float64_t *vdata,
-                              np.float64_t *wdata,
-                              np.float64_t *vtoadd,
-                              np.float64_t wtoadd,
-                              int cur_level):
+    cdef int fill(self, QuadTreeNode *node, 
+                        np.int64_t curpos,
+                        np.float64_t *px,
+                        np.float64_t *py,
+                        np.float64_t *pdx,
+                        np.float64_t *pdy,
+                        np.float64_t *vdata,
+                        np.float64_t *wdata,
+                        np.float64_t *vtoadd,
+                        np.float64_t wtoadd,
+                        np.int64_t level):
         cdef int i, j, n
-        if cur_level == level:
-            if node.children[0][0] != NULL: return 0
+        if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
                     vdata[self.nvals * curpos + i] = fmax(node.val[i], vtoadd[i])
@@ -402,10 +405,13 @@
                 for i in range(self.nvals):
                     vdata[self.nvals * curpos + i] = node.val[i] + vtoadd[i]
                 wdata[curpos] = node.weight_val + wtoadd
-            pdata[curpos * 2] = node.pos[0]
-            pdata[curpos * 2 + 1] = node.pos[1]
+            pdx[curpos] = 1.0 / (self.top_grid_dims[0]*2**level)
+            pdy[curpos] = 1.0 / (self.top_grid_dims[1]*2**level)
+            px[curpos] = (0.5 + node.pos[0]) * pdx[curpos]
+            py[curpos] = (0.5 + node.pos[1]) * pdy[curpos]
+            pdx[curpos] /= 2.0
+            pdy[curpos] /= 2.0
             return 1
-        if node.children[0][0] == NULL: return 0
         cdef np.int64_t added = 0
         if self.merged == 1:
             for i in range(self.nvals):
@@ -419,9 +425,9 @@
                 if self.merged == -1:
                     for n in range(self.nvals):
                         vtoadd[n] = node.val[n]
-                added += self.fill_from_level(node.children[i][j],
-                        level, curpos + added, pdata, vdata, wdata,
-                        vtoadd, wtoadd, cur_level + 1)
+                added += self.fill(node.children[i][j],
+                        curpos + added, px, py, pdx, pdy, vdata, wdata,
+                        vtoadd, wtoadd, level + 1)
         if self.merged == 1:
             for i in range(self.nvals):
                 vtoadd[i] -= node.val[i]


diff -r 07480041f1d5a7055eb47a53528eddca4d9b8aa5 -r 2d4f73ae68539c01fce350f122aedb415c6bcf02 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -879,7 +879,7 @@
         """
         axis = fix_axis(axis)
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
-        proj = pf.h.proj(fields, axis, weight_field=weight_field,max_level=max_level,center=center)
+        proj = pf.h.proj(fields, axis, weight_field=weight_field, center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
 
 class OffAxisSlicePlot(PWViewerMPL):

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list