[yt-svn] commit/yt: 14 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jul 1 07:01:28 PDT 2013


14 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/4609f5c48f8a/
Changeset:   4609f5c48f8a
Branch:      yt
User:        samskillman
Date:        2013-06-04 19:43:51
Summary:     Initial go at cython amrkdtree. Works, but doesn't show a huge improvement.
Needs cleaning.
Affected #:  5 files

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -26,6 +26,7 @@
 import numpy as np
 from yt.funcs import *
 from yt.utilities.lib import kdtree_get_choices
+from yt.utilities.lib.amr_kdtools import kd_is_leaf
 
 def _lchild_id(node_id): return (node_id<<1)
 def _rchild_id(node_id): return (node_id<<1) + 1
@@ -309,59 +310,6 @@
     else:
         return kd_node_check(node.left)+kd_node_check(node.right)
 
-def kd_is_leaf(node):
-    has_l_child = node.left is None
-    has_r_child = node.right is None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
 def depth_first_touch(tree, max_node=None):
     '''
     Yields a depth-first traversal of the kd tree always going to
@@ -392,64 +340,64 @@
         current, previous = step_depth(current, previous)
 
 
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
+# def viewpoint_traverse(tree, viewpoint):
+#     '''
+#     Yields a viewpoint dependent traversal of the kd-tree.  Starts
+#     with nodes furthest away from viewpoint.
+#     '''
+# 
+#     current = tree.trunk
+#     previous = None
+#     while current is not None:
+#         yield current
+#         current, previous = step_viewpoint(current, previous, viewpoint)
+# 
+# def step_viewpoint(current, previous, viewpoint):
+#     '''
+#     Takes a single step in the viewpoint based traversal.  Always
+#     goes to the node furthest away from viewpoint first.
+#     '''
+#     if kd_is_leaf(current): # At a leaf, move back up
+#         previous = current
+#         current = current.parent
+#     elif current.split.dim is None: # This is a dead node
+#         previous = current
+#         current = current.parent
+# 
+#     elif current.parent is previous: # Moving down
+#         previous = current
+#         if viewpoint[current.split.dim] <= current.split.pos:
+#             if current.right is not None:
+#                 current = current.right
+#             else:
+#                 previous = current.right
+#         else:
+#             if current.left is not None:
+#                 current = current.left
+#             else:
+#                 previous = current.left
+# 
+#     elif current.right is previous: # Moving up from right 
+#         previous = current
+#         if viewpoint[current.split.dim] <= current.split.pos:
+#             if current.left is not None:
+#                 current = current.left
+#             else:
+#                 current = current.parent
+#         else:
+#             current = current.parent
+# 
+#     elif current.left is previous: # Moving up from left child
+#         previous = current
+#         if viewpoint[current.split.dim] > current.split.pos:
+#             if current.right is not None:
+#                 current = current.right
+#             else:
+#                 current = current.parent
+#         else:
+#             current = current.parent
+# 
+#     return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,12 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
+from amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch, add_grid
+        depth_first_touch
+from yt.utilities.lib.amr_kdtools import Node, add_grids, add_grid, \
+        kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -67,12 +69,11 @@
         self.comm_rank = comm_rank
         self.comm_size = comm_size
         self.trunk = Node(None, None, None,
-                left, right, None, 1)
+                left, right, -1, 1)
         if grids is None:
-            self.grids = pf.h.region((left+right)/2., left, right)._grids
-        else:
-            self.grids = grids
-        self.build(grids)
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
         lvl_range = range(self.min_level, self.max_level+1)
@@ -91,7 +92,8 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+                    add_grids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
                     del gles, gres, gids, grids
@@ -99,31 +101,35 @@
                     grids_added += grids.size
                     [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
                               self.comm_rank, self.comm_size) for g in grids]
-        else:
-            gles = np.array([g.LeftEdge for g in grids])
-            gres = np.array([g.RightEdge for g in grids])
-            gids = np.array([g.id for g in grids])
+            return
 
-            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids, grids
+        for lvl in lvl_range:
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl])
 
+            add_grids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
 
-    def build(self, grids = None):
+
+    def build(self, grids=None):
         self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -135,7 +141,7 @@
     def sum_cells(self, all_cells=False):
         cells = 0
         for node in depth_traverse(self):
-            if node.grid is None:
+            if node.grid != -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
@@ -204,14 +210,8 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -269,12 +269,13 @@
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
@@ -292,8 +293,8 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
         if not self._initialized: self.brick_dimensions.append(dims)
@@ -427,7 +428,7 @@
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
                 i = node.id
-                if node.grid is not None:
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -40,3 +40,4 @@
 from .marching_cubes import *
 from .GridTree import *
 from .write_array import *
+from .amr_kdtools import *

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/lib/amr_kdtools.pyx
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -0,0 +1,750 @@
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free, abs
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
+from field_interpolation_tables cimport \
+    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
+    FIT_eval_transfer_with_light
+from fixed_interpolator cimport *
+
+from cython.parallel import prange, parallel, threadid
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+
+DEF Nch = 4
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef class Node:
+
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public int node_id
+    cdef np.float64_t * left_edge
+    cdef np.float64_t * right_edge
+    cdef public data
+    cdef Split * split
+
+    def __cinit__(self, 
+                  Node parent, 
+                  Node left, 
+                  Node right, 
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  int grid,
+                  int node_id):
+        self.left = left
+        self.right = right
+        self.parent = parent
+        cdef int i
+        self.left_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+        self.right_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.grid = grid
+        self.node_id = node_id
+
+    def print_me(self):
+        print 'Node %i' % self.node_id
+        print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], 
+                                   self.left_edge[2])
+        print '\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], 
+                                   self.right_edge[2])
+        print '\t grid: %i' % self.grid
+
+
+def get_left_edge(Node node):
+    le = np.empty(3, dtype='float64')
+    for i in range(3):
+        le[i] = node.left_edge[i]
+    return le
+
+def get_right_edge(Node node):
+    re = np.empty(3, dtype='float64')
+    for i in range(3):
+        re[i] = node.right_edge[i]
+    return re
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _lchild_id(int node_id):
+    return (node_id<<1)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _rchild_id(int node_id):
+    return (node_id<<1) + 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _parent_id(int node_id):
+    return (node_id-1) >> 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_build(Node node, int rank, int size):
+    return 1
+    # if (node.node_id < size) or (node.node_id >= 2*size):
+    #     return 1 
+    # elif node.node_id - size == rank:
+    #     return 1 
+    # else:
+    #     return 0 
+
+def kd_traverse(Node trunk, viewpoint=None):
+    if viewpoint is None:
+        for node in depth_traverse(trunk):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+    else:
+        for node in viewpoint_traverse(trunk, viewpoint):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_grid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grid(node, gle, gre, gid, rank, size)
+    else:
+        less_id = gle[node.split.dim] < node.split.pos
+        if less_id:
+            add_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+        greater_id = gre[node.split.dim] > node.split.pos
+        if greater_id:
+            add_grid(node.right, gle, gre,
+                     gid, rank, size)
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def insert_grid(Node node, 
+                      np.ndarray[np.float64_t, ndim=1] gle, 
+                      np.ndarray[np.float64_t, ndim=1] gre, 
+                      int grid_id, 
+                      int rank,
+                      int size):
+    if not should_i_build(node, rank, size):
+        return
+
+    # If we should continue to split based on parallelism, do so!
+    # if should_i_split(node, rank, size):
+    #     geo_split(node, gle, gre, grid_id, rank, size)
+    #     return
+    cdef int contained = 1
+    for i in range(3):
+        if gle[i] > node.left_edge[i] or\
+           gre[i] < node.right_edge[i]:
+            contained *= 0
+
+    if contained == 1:
+        node.grid = grid_id 
+        assert(node.grid != -1)
+        return
+
+    # Split the grid
+    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1 
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_grids(Node node, 
+                    int ngrids,
+                    np.ndarray[np.float64_t, ndim=2] gles, 
+                    np.ndarray[np.float64_t, ndim=2] gres, 
+                    np.ndarray[np.int64_t, ndim=1] gids, 
+                    int rank,
+                    int size):
+    cdef int i, nless, ngreater
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, ngrids, gles, gres, gids, rank, size)
+        return
+
+    less_ids = gles[:,node.split.dim] < node.split.pos
+    greater_ids = gres[:,node.split.dim] > node.split.pos
+    nless = 0
+    ngreater = 0
+    for i in xrange(ngrids):
+        nless += less_ids[i]
+        ngreater += greater_ids[i]
+        
+    if nless > 0:
+        add_grids(node.left, nless, gles[less_ids], gres[less_ids],
+                  gids[less_ids], rank, size)
+
+    if ngreater > 0:
+        add_grids(node.right, ngreater, gles[greater_ids], gres[greater_ids],
+                  gids[greater_ids], rank, size)
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_split(Node node, int rank, int size):
+    if node.node_id < size:
+        return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void insert_grids(Node node, 
+                       int ngrids,
+                       np.ndarray[np.float64_t, ndim=2] gles, 
+                       np.ndarray[np.float64_t, ndim=2] gres, 
+                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       int rank,
+                       int size):
+    
+    if not should_i_build(node, rank, size) or ngrids == 0:
+        return
+    cdef int contained = 1
+    cdef int check
+
+    if ngrids == 1:
+        # If we should continue to split based on parallelism, do so!
+        #if should_i_split(node, rank, size):
+        #    geo_split(node, gles, gres, grid_ids, rank, size)
+        #    return
+
+        for i in range(3):
+            contained *= gles[0, i] <= node.left_edge[i]
+            contained *= gres[0, i] >= node.right_edge[i]
+    
+        if contained == 1:
+            # print 'Node fully contained, setting to grid: %i' % gids[0]
+            node.grid = gids[0]
+            assert(node.grid != -1)
+            return
+
+    # Split the grids
+    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def split_grid(Node node, 
+               np.ndarray[np.float64_t, ndim=1] gle, 
+               np.ndarray[np.float64_t, ndim=1] gre, 
+               int gid, 
+               int rank,
+               int size):
+    # Find a Split
+    data = np.empty((1, 2, 3), dtype='float64')
+    for i in range(3):
+        data[0, 0, i] = gle[i]
+        data[0, 1, i] = gre[i]
+        # print 'Single Data: ', gle[i], gre[i]
+
+    le = np.empty(3)
+    re = np.empty(3)
+    for i in range(3):
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+
+    best_dim, split_pos, less_id, greater_id = \
+        kdtree_get_choices(1, data, le, re)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if less_id == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if greater_id == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef kdtree_get_choices(int n_grids, 
+                        np.ndarray[np.float64_t, ndim=3] data,
+                        np.ndarray[np.float64_t, ndim=1] l_corner,
+                        np.ndarray[np.float64_t, ndim=1] r_corner):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    my_split = 0
+    best_dim = -1
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                # print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i, j, dim] and
+                        data[i, j, dim] < r_corner[dim]):
+                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i, j, dim]:
+                        skipit = 1
+                        # print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i, j, dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        # print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
+    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+    for i in range(n_grids):
+        if data[i, 0, best_dim] < split:
+            less_ids[i] = 1
+        else:
+            less_ids[i] = 0
+        if data[i, 1, best_dim] > split:
+            greater_ids[i] = 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, less_ids.view('bool'), greater_ids.view('bool')
+
+#@cython.boundscheck(True)
+#@cython.wraparound(False)
+#@cython.cdivision(True)
+cdef int split_grids(Node node, 
+                       int ngrids,
+                       np.ndarray[np.float64_t, ndim=2] gles, 
+                       np.ndarray[np.float64_t, ndim=2] gres, 
+                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       int rank,
+                       int size):
+    # Find a Split
+    cdef int i, j, k
+
+    le = get_left_edge(node)
+    re = get_right_edge(node)
+
+    data = np.array([(gles[i,:], gres[i,:]) for i in
+        xrange(ngrids)], copy=False)
+    best_dim, split_pos, less_ids, greater_ids = \
+        kdtree_get_choices(ngrids, data, le, re)
+ 
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grids.'
+        return -1
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    nless = np.sum(less_ids)
+    ngreat = np.sum(greater_ids)
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, nless, gles[less_ids], gres[less_ids],
+                 gids[less_ids], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, ngreat, gles[greater_ids], gres[greater_ids],
+                 gids[greater_ids], rank, size)
+
+    return 0
+
+# def geo_split_grid(node, gle, gre, grid_id, rank, size):
+#     big_dim = np.argmax(gre-gle)
+#     new_pos = (gre[big_dim] + gle[big_dim])/2.
+#     old_gre = gre.copy()
+#     new_gle = gle.copy()
+#     new_gle[big_dim] = new_pos
+#     gre[big_dim] = new_pos
+# 
+#     split = Split(big_dim, new_pos)
+# 
+#     # Create a Split
+#     divide(node, split)
+# 
+#     # Populate Left Node
+#     #print 'Inserting left node', node.left_edge, node.right_edge
+#     insert_grid(node.left, gle, gre,
+#                 grid_id, rank, size)
+# 
+#     # Populate Right Node
+#     #print 'Inserting right node', node.left_edge, node.right_edge
+#     insert_grid(node.right, new_gle, old_gre,
+#                 grid_id, rank, size)
+#     return
+# 
+# 
+# def geo_split(node, gles, gres, grid_ids, rank, size):
+#     big_dim = np.argmax(gres[0]-gles[0])
+#     new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
+#     old_gre = gres[0].copy()
+#     new_gle = gles[0].copy()
+#     new_gle[big_dim] = new_pos
+#     gres[0][big_dim] = new_pos
+#     gles = np.append(gles, np.array([new_gle]), axis=0)
+#     gres = np.append(gres, np.array([old_gre]), axis=0)
+#     grid_ids = np.append(grid_ids, grid_ids, axis=0)
+# 
+#     split = Split(big_dim, new_pos)
+# 
+#     # Create a Split
+#     divide(node, split)
+# 
+#     # Populate Left Node
+#     #print 'Inserting left node', node.left_edge, node.right_edge
+#     insert_grids(node.left, gles[:1], gres[:1],
+#             grid_ids[:1], rank, size)
+# 
+#     # Populate Right Node
+#     #print 'Inserting right node', node.left_edge, node.right_edge
+#     insert_grids(node.right, gles[1:], gres[1:],
+#             grid_ids[1:], rank, size)
+#     return
+
+cdef new_right(Node node, Split * split):
+    new_right = Node.right_edge.copy()
+    new_right[split.dim] = split.pos
+    return new_right
+
+cdef new_left(Node node, Split * split):
+    new_left = Node.left_edge.copy()
+    new_left[split.dim] = split.pos
+    return new_left
+
+cdef void divide(Node node, Split * split):
+    # Create a Split
+    node.split = split
+    
+    lle = np.zeros(3, dtype='float64')
+    lre = np.zeros(3, dtype='float64')
+    rle = np.zeros(3, dtype='float64')
+    rre = np.zeros(3, dtype='float64')
+
+    cdef int i
+    for i in range(3):
+        lle[i] = node.left_edge[i]
+        lre[i] = node.right_edge[i]
+        rle[i] = node.left_edge[i]
+        rre[i] = node.right_edge[i]
+    lre[split.dim] = split.pos
+    rle[split.dim] = split.pos
+
+    node.left = Node(node, None, None,
+            lle.copy(), lre.copy(), node.grid,
+                     _lchild_id(node.node_id))
+
+    node.right = Node(node, None, None,
+            rle.copy(), rre.copy(), node.grid,
+                      _rchild_id(node.node_id))
+    
+    return
+# 
+def kd_sum_volume(Node node):
+    cdef np.float64_t vol = 1.0
+    if (node.left is None) and (node.right is None):
+        if node.grid == -1:
+            return 0.0
+        for i in range(3):
+            vol *= node.right_edge[i] - node.left_edge[i]
+        return vol 
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+# 
+# def kd_sum_cells(node):
+#     if (node.left is None) and (node.right is None):
+#         if node.grid is None:
+#             return 0.0
+#         return np.prod(node.right_edge - node.left_edge)
+#     else:
+#         return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+# 
+# 
+
+def kd_node_check(Node node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid != -1:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+
+def kd_is_leaf(Node node):
+    cdef int has_l_child = node.left == None
+    cdef int has_r_child = node.right == None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+ 
+def depth_traverse(Node trunk, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+# 
+# def depth_first_touch(tree, max_node=None):
+#     '''
+#     Yields a depth-first traversal of the kd tree always going to
+#     the left child before the right.
+#     '''
+#     current = tree.trunk
+#     previous = None
+#     if max_node is None:
+#         max_node = np.inf
+#     while current is not None:
+#         if previous is None or previous.parent != current:
+#             yield current
+#         current, previous = step_depth(current, previous)
+#         if current is None: break
+#         if current.id >= max_node:
+#             current = current.parent
+#             previous = current.right
+# 
+# def breadth_traverse(tree):
+#     '''
+#     Yields a breadth-first traversal of the kd tree always going to
+#     the left child before the right.
+#     '''
+#     current = tree.trunk
+#     previous = None
+#     while current is not None:
+#         yield current
+#         current, previous = step_depth(current, previous)
+# 
+# 
+def viewpoint_traverse(tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(Node current, 
+                   Node previous, 
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+# 
+# 
+# def receive_and_reduce(comm, incoming_rank, image, add_to_front):
+#     mylog.debug( 'Receiving image from %04i' % incoming_rank)
+#     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+#     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
+#         (image.shape[0], image.shape[1], image.shape[2]))
+# 
+#     if add_to_front:
+#         front = arr2
+#         back = image
+#     else:
+#         front = image
+#         back = arr2
+# 
+#     if image.shape[2] == 3:
+#         # Assume Projection Camera, Add
+#         np.add(image, front, image)
+#         return image
+# 
+#     ta = 1.0 - front[:,:,3]
+#     np.maximum(ta, 0.0, ta)
+#     # This now does the following calculation, but in a memory
+#     # conservative fashion
+#     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
+#     image = back.copy()
+#     for i in range(4):
+#         np.multiply(image[:,:,i], ta, image[:,:,i])
+#     np.add(image, front, image)
+#     return image
+# 
+# def send_to_parent(comm, outgoing_rank, image):
+#     mylog.debug( 'Sending image to %04i' % outgoing_rank)
+#     comm.send_array(image, outgoing_rank, tag=comm.rank)
+# 
+# def scatter_image(comm, root, image):
+#     mylog.debug( 'Scattering from %04i' % root)
+#     image = comm.mpi_bcast(image, root=root)
+#     return image
+# 
+# def find_node(node, pos):
+#     """
+#     Find the AMRKDTree node enclosing a position
+#     """
+#     assert(np.all(node.left_edge <= pos))
+#     assert(np.all(node.right_edge > pos))
+#     while not kd_is_leaf(node):
+#         if pos[node.split.dim] < node.split.pos:
+#             node = node.left
+#         else:
+#             node = node.right
+#     return node
+
+

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -249,6 +249,10 @@
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_extension("amr_kdtools", 
+                         ["yt/utilities/lib/amr_kdtools.pyx"],
+                         extra_compile_args=['-O3'],
+                         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]


https://bitbucket.org/yt_analysis/yt/commits/aa34c033a081/
Changeset:   aa34c033a081
Branch:      yt
User:        samskillman
Date:        2013-06-05 19:29:22
Summary:     Okay, traversal and rendering now works. 2x speedup for now. Need to move everything
away from ndarrays next.
Affected #:  2 files

diff -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f -r aa34c033a0816641aa0249fc52e7c5df781004cd yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,7 @@
 from amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image, find_node, \
         depth_first_touch
-from yt.utilities.lib.amr_kdtools import Node, add_grids, add_grid, \
+from yt.utilities.lib.amr_kdtools import Node, add_grids, \
         kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
@@ -210,7 +210,7 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        for node in kd_traverse(self.tree.trunk):
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
             yield self.get_brick_data(node)
 
     def get_node(self, nodeid):

diff -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f -r aa34c033a0816641aa0249fc52e7c5df781004cd yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -118,18 +118,67 @@
                 yield node
 
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def add_grid(Node node, 
-                   np.ndarray[np.float64_t, ndim=1] gle, 
-                   np.ndarray[np.float64_t, ndim=1] gre, 
-                   int gid, 
-                   int rank,
-                   int size):
+# @cython.boundscheck(False)
+# @cython.wraparound(False)
+# @cython.cdivision(True)
+# def add_grid(Node node, 
+#                    np.ndarray[np.float64_t, ndim=1] gle, 
+#                    np.ndarray[np.float64_t, ndim=1] gre, 
+#                    int gid, 
+#                    int rank,
+#                    int size):
+# 
+#     if not should_i_build(node, rank, size):
+#         return
+# 
+#     if kd_is_leaf(node):
+#         insert_grid(node, gle, gre, gid, rank, size)
+#     else:
+#         less_id = gle[node.split.dim] < node.split.pos
+#         if less_id:
+#             add_grid(node.left, gle, gre,
+#                      gid, rank, size)
+# 
+#         greater_id = gre[node.split.dim] > node.split.pos
+#         if greater_id:
+#             add_grid(node.right, gle, gre,
+#                      gid, rank, size)
+#     return
 
-    if not should_i_build(node, rank, size):
-        return
+# @cython.boundscheck(False)
+# @cython.wraparound(False)
+# @cython.cdivision(True)
+# def insert_grid(Node node, 
+#                       np.ndarray[np.float64_t, ndim=1] gle, 
+#                       np.ndarray[np.float64_t, ndim=1] gre, 
+#                       int grid_id, 
+#                       int rank,
+#                       int size):
+#     if not should_i_build(node, rank, size):
+#         return
+# 
+#     # If we should continue to split based on parallelism, do so!
+#     # if should_i_split(node, rank, size):
+#     #     geo_split(node, gle, gre, grid_id, rank, size)
+#     #     return
+#     cdef int contained = 1
+#     for i in range(3):
+#         if gle[i] > node.left_edge[i] or\
+#            gre[i] < node.right_edge[i]:
+#             contained *= 0
+# 
+#     if contained == 1:
+#         node.grid = grid_id 
+#         assert(node.grid != -1)
+#         return
+# 
+#     # Split the grid
+#     cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+#     # If check is -1, then we have found a place where there are no choices.
+#     # Exit out and set the node to None.
+#     if check == -1:
+#         node.grid = -1 
+#     return
 
     if kd_is_leaf(node):
         insert_grid(node, gle, gre, gid, rank, size)
@@ -190,7 +239,8 @@
                     np.ndarray[np.int64_t, ndim=1] gids, 
                     int rank,
                     int size):
-    cdef int i, nless, ngreater
+    cdef int i, j, nless, ngreater
+    cdef np.int64_t gid
     if not should_i_build(node, rank, size):
         return
 
@@ -198,21 +248,53 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    less_ids = gles[:,node.split.dim] < node.split.pos
-    greater_ids = gres[:,node.split.dim] > node.split.pos
+    cdef np.ndarray less_ids = np.zeros(ngrids, dtype='int64')
+    cdef np.ndarray greater_ids = np.zeros(ngrids, dtype='int64')
+   
     nless = 0
     ngreater = 0
-    for i in xrange(ngrids):
-        nless += less_ids[i]
-        ngreater += greater_ids[i]
-        
+    for i in range(ngrids):
+        if gles[i, node.split.dim] < node.split.pos:
+            less_ids[nless] = i
+            nless += 1
+            
+        if gres[i, node.split.dim] > node.split.pos:
+            greater_ids[ngreater] = i
+            ngreater += 1
+
+    #print 'nless: %i' % nless
+    #print 'ngreater: %i' % ngreater
+
+    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+
+    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+
+    cdef int index
+    for i in range(nless):
+        index = less_ids[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i, j] = gles[index, j]
+            less_gres[i, j] = gres[index, j]
+
     if nless > 0:
-        add_grids(node.left, nless, gles[less_ids], gres[less_ids],
-                  gids[less_ids], rank, size)
+        add_grids(node.left, nless, less_gles, less_gres,
+                  l_ids, rank, size)
+            
+    for i in range(ngreater):
+        index = greater_ids[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i, j] = gles[index, j]
+            greater_gres[i, j] = gres[index, j]
 
     if ngreater > 0:
-        add_grids(node.right, ngreater, gles[greater_ids], gres[greater_ids],
-                  gids[greater_ids], rank, size)
+        add_grids(node.right, ngreater, greater_gles, greater_gres,
+                  g_ids, rank, size)
     return
 
 @cython.boundscheck(False)
@@ -263,68 +345,75 @@
         node.grid = -1
     return
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def split_grid(Node node, 
-               np.ndarray[np.float64_t, ndim=1] gle, 
-               np.ndarray[np.float64_t, ndim=1] gre, 
-               int gid, 
-               int rank,
-               int size):
-    # Find a Split
-    data = np.empty((1, 2, 3), dtype='float64')
-    for i in range(3):
-        data[0, 0, i] = gle[i]
-        data[0, 1, i] = gre[i]
-        # print 'Single Data: ', gle[i], gre[i]
-
-    le = np.empty(3)
-    re = np.empty(3)
-    for i in range(3):
-        le[i] = node.left_edge[i]
-        re[i] = node.right_edge[i]
-
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(1, data, le, re)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        print 'Failed to split grid.'
-        return -1
-
-        
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = best_dim
-    split.pos = split_pos
-
-    #del data
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id == 1:
-        insert_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id == 1:
-        insert_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-    return 0
+# @cython.boundscheck(False)
+# @cython.wraparound(False)
+# @cython.cdivision(True)
+# def split_grid(Node node, 
+#                np.ndarray[np.float64_t, ndim=1] gle, 
+#                np.ndarray[np.float64_t, ndim=1] gre, 
+#                int gid, 
+#                int rank,
+#                int size):
+#     # Find a Split
+#     data = np.empty((1, 2, 3), dtype='float64')
+#     for i in range(3):
+#         data[0, 0, i] = gle[i]
+#         data[0, 1, i] = gre[i]
+#         # print 'Single Data: ', gle[i], gre[i]
+# 
+#     le = np.empty(3)
+#     re = np.empty(3)
+#     for i in range(3):
+#         le[i] = node.left_edge[i]
+#         re[i] = node.right_edge[i]
+# 
+#     best_dim, split_pos, less_id, greater_id = \
+#         kdtree_get_choices(1, data, le, re)
+# 
+#     # If best_dim is -1, then we have found a place where there are no choices.
+#     # Exit out and set the node to None.
+#     if best_dim == -1:
+#         print 'Failed to split grid.'
+#         return -1
+# 
+#         
+#     split = <Split *> malloc(sizeof(Split))
+#     split.dim = best_dim
+#     split.pos = split_pos
+# 
+#     #del data
+# 
+#     # Create a Split
+#     divide(node, split)
+# 
+#     # Populate Left Node
+#     #print 'Inserting left node', node.left_edge, node.right_edge
+#     if less_id == 1:
+#         insert_grid(node.left, gle, gre,
+#                      gid, rank, size)
+# 
+#     # Populate Right Node
+#     #print 'Inserting right node', node.left_edge, node.right_edge
+#     if greater_id == 1:
+#         insert_grid(node.right, gle, gre,
+#                      gid, rank, size)
+# 
+#     return 0
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef kdtree_get_choices(int n_grids, 
-                        np.ndarray[np.float64_t, ndim=3] data,
-                        np.ndarray[np.float64_t, ndim=1] l_corner,
-                        np.ndarray[np.float64_t, ndim=1] r_corner):
+                        np.float64_t ***data,
+                        np.float64_t *l_corner,
+                        np.float64_t *r_corner,
+                        np.uint8_t *less_ids,
+                        np.uint8_t *greater_ids,
+                       ):
+# cdef kdtree_get_choices(int n_grids, 
+#                         np.ndarray[np.float64_t, ndim=3] data,
+#                         np.ndarray[np.float64_t, ndim=1] l_corner,
+#                         np.ndarray[np.float64_t, ndim=1] r_corner):
     cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
     cdef np.float64_t **uniquedims, *uniques, split
     uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
@@ -341,19 +430,19 @@
             # Check for disqualification
             for j in range(2):
                 # print "Checking against", i,j,dim,data[i,j,dim]
-                if not (l_corner[dim] < data[i, j, dim] and
-                        data[i, j, dim] < r_corner[dim]):
+                if not (l_corner[dim] < data[i][j][dim] and
+                        data[i][j][dim] < r_corner[dim]):
                     # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
                     continue
                 skipit = 0
                 # Add our left ...
                 for k in range(n_unique):
-                    if uniques[k] == data[i, j, dim]:
+                    if uniques[k] == data[i][j][dim]:
                         skipit = 1
                         # print "Identified", uniques[k], data[i,j,dim], n_unique
                         break
                 if skipit == 0:
-                    uniques[n_unique] = data[i, j, dim]
+                    uniques[n_unique] = data[i][j][dim]
                     n_unique += 1
         if n_unique > my_max:
             best_dim = dim
@@ -366,19 +455,20 @@
         tarr[i] = uniquedims[best_dim][i]
     tarr.sort()
     split = tarr[my_split]
-    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
-    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+    cdef int nless=0, ngreater=0
     for i in range(n_grids):
-        if data[i, 0, best_dim] < split:
+        if data[i][0][best_dim] < split:
             less_ids[i] = 1
+            nless += 1
         else:
             less_ids[i] = 0
-        if data[i, 1, best_dim] > split:
+        if data[i][1][best_dim] > split:
             greater_ids[i] = 1
+            ngreater += 1
         else:
             greater_ids[i] = 0
     # Return out unique values
-    return best_dim, split, less_ids.view('bool'), greater_ids.view('bool')
+    return best_dim, split, nless, ngreater
 
 #@cython.boundscheck(True)
 #@cython.wraparound(False)
@@ -396,10 +486,21 @@
     le = get_left_edge(node)
     re = get_right_edge(node)
 
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(ngrids)], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(ngrids, data, le, re)
+    data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
+    for i in range(ngrids):
+        data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+        for j in range(2):
+            data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        for j in range(3):
+            data[i][0][j] = gles[i, j]
+            data[i][1][j] = gres[i, j]
+
+    less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
  
     # If best_dim is -1, then we have found a place where there are no choices.
     # Exit out and set the node to None.
@@ -416,17 +517,54 @@
     # Create a Split
     divide(node, split)
 
-    nless = np.sum(less_ids)
-    ngreat = np.sum(greater_ids)
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, nless, gles[less_ids], gres[less_ids],
-                 gids[less_ids], rank, size)
+    cdef np.ndarray less_index = np.zeros(ngrids, dtype='int64')
+    cdef np.ndarray greater_index = np.zeros(ngrids, dtype='int64')
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if less_ids[i] == 1:
+            less_index[nless] = i
+            nless += 1
 
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, ngreat, gles[greater_ids], gres[greater_ids],
-                 gids[greater_ids], rank, size)
+        if greater_ids[i] == 1:
+            greater_index[ngreater] = i
+            ngreater += 1
+
+    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+
+    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+
+    cdef int index
+    for i in range(nless):
+        index = less_index[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i, j] = gles[index, j]
+            less_gres[i, j] = gres[index, j]
+
+    if nless > 0:
+        # Populate Left Node
+        #print 'Inserting left node', node.left_edge, node.right_edge
+        insert_grids(node.left, nless, less_gles, less_gres,
+                     l_ids, rank, size)
+
+    for i in range(ngreater):
+        index = greater_index[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i, j] = gles[index, j]
+            greater_gres[i, j] = gres[index, j]
+
+    if ngreater > 0:
+        # Populate Right Node
+        #print 'Inserting right node', node.left_edge, node.right_edge
+        insert_grids(node.right, ngreater, greater_gles, greater_gres,
+                     g_ids, rank, size)
 
     return 0
 
@@ -640,7 +778,7 @@
     with nodes furthest away from viewpoint.
     '''
 
-    current = tree.trunk
+    current = tree
     previous = None
     while current is not None:
         yield current


https://bitbucket.org/yt_analysis/yt/commits/699c480bfb50/
Changeset:   699c480bfb50
Branch:      yt
User:        samskillman
Date:        2013-06-05 21:24:20
Summary:     Move from ndarray to np.float64_t pointers. Now up to 10x over original.
Affected #:  3 files

diff -r aa34c033a0816641aa0249fc52e7c5df781004cd -r 699c480bfb50a241d28c18693e65551820e1caae yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,7 @@
 from amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image, find_node, \
         depth_first_touch
-from yt.utilities.lib.amr_kdtools import Node, add_grids, \
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, \
         kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
@@ -92,7 +92,7 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gids.size, gles, gres, gids, 
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
                               self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
@@ -108,7 +108,7 @@
             gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
             gids = np.array([g.id for g in grids if g.Level == lvl])
 
-            add_grids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
             del gles, gres, gids
 
 

diff -r aa34c033a0816641aa0249fc52e7c5df781004cd -r 699c480bfb50a241d28c18693e65551820e1caae yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -26,13 +26,13 @@
 @cython.cdivision(True)
 cdef class Node:
 
-    cdef public Node left
-    cdef public Node right
-    cdef public Node parent
-    cdef public int grid
-    cdef public int node_id
-    cdef np.float64_t * left_edge
-    cdef np.float64_t * right_edge
+    cdef readonly Node left
+    cdef readonly Node right
+    cdef readonly Node parent
+    cdef readonly int grid
+    cdef readonly int node_id
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
     cdef public data
     cdef Split * split
 
@@ -48,8 +48,6 @@
         self.right = right
         self.parent = parent
         cdef int i
-        self.left_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
-        self.right_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
         for i in range(3):
             self.left_edge[i] = left_edge[i]
             self.right_edge[i] = right_edge[i]
@@ -99,13 +97,12 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef int should_i_build(Node node, int rank, int size):
-    return 1
-    # if (node.node_id < size) or (node.node_id >= 2*size):
-    #     return 1 
-    # elif node.node_id - size == rank:
-    #     return 1 
-    # else:
-    #     return 0 
+    if (node.node_id < size) or (node.node_id >= 2*size):
+        return 1 
+    elif node.node_id - size == rank:
+        return 1 
+    else:
+        return 0 
 
 def kd_traverse(Node trunk, viewpoint=None):
     if viewpoint is None:
@@ -239,6 +236,35 @@
                     np.ndarray[np.int64_t, ndim=1] gids, 
                     int rank,
                     int size):
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    for i in range(ngrids):
+        pgles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgids[i] = gids[i]
+        for j in range(3):
+            pgles[i][j] = gles[i, j]
+            pgres[i][j] = gres[i, j]
+
+    add_grids(node, ngrids, pgles, pgres, pgids, rank, size)
+
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grids(Node node, 
+                    int ngrids,
+                    np.float64_t **gles, 
+                    np.float64_t **gres, 
+                    np.int64_t *gids, 
+                    int rank,
+                    int size):
     cdef int i, j, nless, ngreater
     cdef np.int64_t gid
     if not should_i_build(node, rank, size):
@@ -248,38 +274,44 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    cdef np.ndarray less_ids = np.zeros(ngrids, dtype='int64')
-    cdef np.ndarray greater_ids = np.zeros(ngrids, dtype='int64')
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
     for i in range(ngrids):
-        if gles[i, node.split.dim] < node.split.pos:
+        if gles[i][node.split.dim] < node.split.pos:
             less_ids[nless] = i
             nless += 1
             
-        if gres[i, node.split.dim] > node.split.pos:
+        if gres[i][node.split.dim] > node.split.pos:
             greater_ids[ngreater] = i
             ngreater += 1
 
     #print 'nless: %i' % nless
     #print 'ngreater: %i' % ngreater
 
-    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
         index = less_ids[i]
         l_ids[i] = gids[index]
         for j in range(3):
-            less_gles[i, j] = gles[index, j]
-            less_gres[i, j] = gres[index, j]
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
 
     if nless > 0:
         add_grids(node.left, nless, less_gles, less_gres,
@@ -289,8 +321,8 @@
         index = greater_ids[i]
         g_ids[i] = gids[index]
         for j in range(3):
-            greater_gles[i, j] = gles[index, j]
-            greater_gres[i, j] = gres[index, j]
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
 
     if ngreater > 0:
         add_grids(node.right, ngreater, greater_gles, greater_gres,
@@ -310,9 +342,9 @@
 @cython.cdivision(True)
 cdef void insert_grids(Node node, 
                        int ngrids,
-                       np.ndarray[np.float64_t, ndim=2] gles, 
-                       np.ndarray[np.float64_t, ndim=2] gres, 
-                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
                        int rank,
                        int size):
     
@@ -328,8 +360,8 @@
         #    return
 
         for i in range(3):
-            contained *= gles[0, i] <= node.left_edge[i]
-            contained *= gres[0, i] >= node.right_edge[i]
+            contained *= gles[0][i] <= node.left_edge[i]
+            contained *= gres[0][i] >= node.right_edge[i]
     
         if contained == 1:
             # print 'Node fully contained, setting to grid: %i' % gids[0]
@@ -470,30 +502,27 @@
     # Return out unique values
     return best_dim, split, nless, ngreater
 
-#@cython.boundscheck(True)
+#@cython.boundscheck(False)
 #@cython.wraparound(False)
 #@cython.cdivision(True)
 cdef int split_grids(Node node, 
                        int ngrids,
-                       np.ndarray[np.float64_t, ndim=2] gles, 
-                       np.ndarray[np.float64_t, ndim=2] gres, 
-                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
                        int rank,
                        int size):
     # Find a Split
     cdef int i, j, k
 
-    le = get_left_edge(node)
-    re = get_right_edge(node)
-
     data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
     for i in range(ngrids):
         data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
         for j in range(2):
             data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
         for j in range(3):
-            data[i][0][j] = gles[i, j]
-            data[i][1][j] = gres[i, j]
+            data[i][0][j] = gles[i][j]
+            data[i][1][j] = gres[i][j]
 
     less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
     greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
@@ -517,8 +546,8 @@
     # Create a Split
     divide(node, split)
 
-    cdef np.ndarray less_index = np.zeros(ngrids, dtype='int64')
-    cdef np.ndarray greater_index = np.zeros(ngrids, dtype='int64')
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -531,21 +560,27 @@
             greater_index[ngreater] = i
             ngreater += 1
 
-    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
         index = less_index[i]
         l_ids[i] = gids[index]
         for j in range(3):
-            less_gles[i, j] = gles[index, j]
-            less_gres[i, j] = gres[index, j]
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
 
     if nless > 0:
         # Populate Left Node
@@ -557,8 +592,8 @@
         index = greater_index[i]
         g_ids[i] = gids[index]
         for j in range(3):
-            greater_gles[i, j] = gles[index, j]
-            greater_gres[i, j] = gres[index, j]
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
 
     if ngreater > 0:
         # Populate Right Node
@@ -634,28 +669,25 @@
     # Create a Split
     node.split = split
     
-    lle = np.zeros(3, dtype='float64')
-    lre = np.zeros(3, dtype='float64')
-    rle = np.zeros(3, dtype='float64')
-    rre = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] le = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] re = np.zeros(3, dtype='float64')
 
     cdef int i
     for i in range(3):
-        lle[i] = node.left_edge[i]
-        lre[i] = node.right_edge[i]
-        rle[i] = node.left_edge[i]
-        rre[i] = node.right_edge[i]
-    lre[split.dim] = split.pos
-    rle[split.dim] = split.pos
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+    re[split.dim] = split.pos
 
     node.left = Node(node, None, None,
-            lle.copy(), lre.copy(), node.grid,
+                     le, re, node.grid,
                      _lchild_id(node.node_id))
 
+    re[split.dim] = node.right_edge[split.dim]
+    le[split.dim] = split.pos
     node.right = Node(node, None, None,
-            rle.copy(), rre.copy(), node.grid,
+                      le, re, node.grid,
                       _rchild_id(node.node_id))
-    
+
     return
 # 
 def kd_sum_volume(Node node):
@@ -668,16 +700,6 @@
         return vol 
     else:
         return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-# 
-# def kd_sum_cells(node):
-#     if (node.left is None) and (node.right is None):
-#         if node.grid is None:
-#             return 0.0
-#         return np.prod(node.right_edge - node.left_edge)
-#     else:
-#         return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-# 
-# 
 
 def kd_node_check(Node node):
     assert (node.left is None) == (node.right is None)
@@ -688,7 +710,6 @@
     else:
         return kd_node_check(node.left)+kd_node_check(node.right)
 
-
 def kd_is_leaf(Node node):
     cdef int has_l_child = node.left == None
     cdef int has_r_child = node.right == None
@@ -741,37 +762,37 @@
         if current.node_id >= max_node:
             current = current.parent
             previous = current.right
-# 
-# def depth_first_touch(tree, max_node=None):
-#     '''
-#     Yields a depth-first traversal of the kd tree always going to
-#     the left child before the right.
-#     '''
-#     current = tree.trunk
-#     previous = None
-#     if max_node is None:
-#         max_node = np.inf
-#     while current is not None:
-#         if previous is None or previous.parent != current:
-#             yield current
-#         current, previous = step_depth(current, previous)
-#         if current is None: break
-#         if current.id >= max_node:
-#             current = current.parent
-#             previous = current.right
-# 
-# def breadth_traverse(tree):
-#     '''
-#     Yields a breadth-first traversal of the kd tree always going to
-#     the left child before the right.
-#     '''
-#     current = tree.trunk
-#     previous = None
-#     while current is not None:
-#         yield current
-#         current, previous = step_depth(current, previous)
-# 
-# 
+
+def depth_first_touch(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
 def viewpoint_traverse(tree, viewpoint):
     '''
     Yields a viewpoint dependent traversal of the kd-tree.  Starts
@@ -832,57 +853,28 @@
             current = current.parent
 
     return current, previous
-# 
-# 
-# def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-#     mylog.debug( 'Receiving image from %04i' % incoming_rank)
-#     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
-#     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
-#         (image.shape[0], image.shape[1], image.shape[2]))
-# 
-#     if add_to_front:
-#         front = arr2
-#         back = image
-#     else:
-#         front = image
-#         back = arr2
-# 
-#     if image.shape[2] == 3:
-#         # Assume Projection Camera, Add
-#         np.add(image, front, image)
-#         return image
-# 
-#     ta = 1.0 - front[:,:,3]
-#     np.maximum(ta, 0.0, ta)
-#     # This now does the following calculation, but in a memory
-#     # conservative fashion
-#     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
-#     image = back.copy()
-#     for i in range(4):
-#         np.multiply(image[:,:,i], ta, image[:,:,i])
-#     np.add(image, front, image)
-#     return image
-# 
-# def send_to_parent(comm, outgoing_rank, image):
-#     mylog.debug( 'Sending image to %04i' % outgoing_rank)
-#     comm.send_array(image, outgoing_rank, tag=comm.rank)
-# 
-# def scatter_image(comm, root, image):
-#     mylog.debug( 'Scattering from %04i' % root)
-#     image = comm.mpi_bcast(image, root=root)
-#     return image
-# 
-# def find_node(node, pos):
-#     """
-#     Find the AMRKDTree node enclosing a position
-#     """
-#     assert(np.all(node.left_edge <= pos))
-#     assert(np.all(node.right_edge > pos))
-#     while not kd_is_leaf(node):
-#         if pos[node.split.dim] < node.split.pos:
-#             node = node.left
-#         else:
-#             node = node.right
-#     return node
 
+cdef int point_in_node(Node node, 
+                       np.ndarray[np.float64_t, ndim=1] point):
+    cdef int i
+    cdef int inside = 1
+    for i in range(3):
+        inside *= node.left_edge[i] <= point[i]
+        inside *= node.right_edge[i] > point[i]
+    return inside
 
+
+def find_node(Node node,
+              np.ndarray[np.float64_t, ndim=1] point):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(point_in_node(node, point))
+    while not kd_is_leaf(node):
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+
+

diff -r aa34c033a0816641aa0249fc52e7c5df781004cd -r 699c480bfb50a241d28c18693e65551820e1caae yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -251,7 +251,6 @@
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("amr_kdtools", 
                          ["yt/utilities/lib/amr_kdtools.pyx"],
-                         extra_compile_args=['-O3'],
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


https://bitbucket.org/yt_analysis/yt/commits/8587b273c9b8/
Changeset:   8587b273c9b8
Branch:      yt
User:        samskillman
Date:        2013-06-05 21:40:50
Summary:     Adding back in single grid adds through a add_pygrid wrapper.
Affected #:  1 file

diff -r 699c480bfb50a241d28c18693e65551820e1caae -r 8587b273c9b851b2b0ca01601bd5d8813c9dd062 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -114,68 +114,18 @@
             if kd_is_leaf(node) and node.grid != -1:
                 yield node
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grid(Node node, 
+                   np.float64_t *gle, 
+                   np.float64_t *gre, 
+                   int gid, 
+                   int rank,
+                   int size):
 
-# @cython.boundscheck(False)
-# @cython.wraparound(False)
-# @cython.cdivision(True)
-# def add_grid(Node node, 
-#                    np.ndarray[np.float64_t, ndim=1] gle, 
-#                    np.ndarray[np.float64_t, ndim=1] gre, 
-#                    int gid, 
-#                    int rank,
-#                    int size):
-# 
-#     if not should_i_build(node, rank, size):
-#         return
-# 
-#     if kd_is_leaf(node):
-#         insert_grid(node, gle, gre, gid, rank, size)
-#     else:
-#         less_id = gle[node.split.dim] < node.split.pos
-#         if less_id:
-#             add_grid(node.left, gle, gre,
-#                      gid, rank, size)
-# 
-#         greater_id = gre[node.split.dim] > node.split.pos
-#         if greater_id:
-#             add_grid(node.right, gle, gre,
-#                      gid, rank, size)
-#     return
-
-# @cython.boundscheck(False)
-# @cython.wraparound(False)
-# @cython.cdivision(True)
-# def insert_grid(Node node, 
-#                       np.ndarray[np.float64_t, ndim=1] gle, 
-#                       np.ndarray[np.float64_t, ndim=1] gre, 
-#                       int grid_id, 
-#                       int rank,
-#                       int size):
-#     if not should_i_build(node, rank, size):
-#         return
-# 
-#     # If we should continue to split based on parallelism, do so!
-#     # if should_i_split(node, rank, size):
-#     #     geo_split(node, gle, gre, grid_id, rank, size)
-#     #     return
-#     cdef int contained = 1
-#     for i in range(3):
-#         if gle[i] > node.left_edge[i] or\
-#            gre[i] < node.right_edge[i]:
-#             contained *= 0
-# 
-#     if contained == 1:
-#         node.grid = grid_id 
-#         assert(node.grid != -1)
-#         return
-# 
-#     # Split the grid
-#     cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
-#     # If check is -1, then we have found a place where there are no choices.
-#     # Exit out and set the node to None.
-#     if check == -1:
-#         node.grid = -1 
-#     return
+    if not should_i_build(node, rank, size):
+        return
 
     if kd_is_leaf(node):
         insert_grid(node, gle, gre, gid, rank, size)
@@ -191,15 +141,35 @@
                      gid, rank, size)
     return
 
+def add_pygrid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    cdef int j
+    for j in range(3):
+        pgles[j] = gle[j]
+        pgres[j] = gre[j]
+
+    add_grid(node, pgles, pgres, gid, rank, size)
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def insert_grid(Node node, 
-                      np.ndarray[np.float64_t, ndim=1] gle, 
-                      np.ndarray[np.float64_t, ndim=1] gre, 
-                      int grid_id, 
-                      int rank,
-                      int size):
+cdef insert_grid(Node node, 
+                np.float64_t *gle, 
+                np.float64_t *gre, 
+                int grid_id, 
+                int rank,
+                int size):
     if not should_i_build(node, rank, size):
         return
 
@@ -229,7 +199,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def add_grids(Node node, 
+def add_pygrids(Node node, 
                     int ngrids,
                     np.ndarray[np.float64_t, ndim=2] gles, 
                     np.ndarray[np.float64_t, ndim=2] gres, 
@@ -377,60 +347,61 @@
         node.grid = -1
     return
 
-# @cython.boundscheck(False)
-# @cython.wraparound(False)
-# @cython.cdivision(True)
-# def split_grid(Node node, 
-#                np.ndarray[np.float64_t, ndim=1] gle, 
-#                np.ndarray[np.float64_t, ndim=1] gre, 
-#                int gid, 
-#                int rank,
-#                int size):
-#     # Find a Split
-#     data = np.empty((1, 2, 3), dtype='float64')
-#     for i in range(3):
-#         data[0, 0, i] = gle[i]
-#         data[0, 1, i] = gre[i]
-#         # print 'Single Data: ', gle[i], gre[i]
-# 
-#     le = np.empty(3)
-#     re = np.empty(3)
-#     for i in range(3):
-#         le[i] = node.left_edge[i]
-#         re[i] = node.right_edge[i]
-# 
-#     best_dim, split_pos, less_id, greater_id = \
-#         kdtree_get_choices(1, data, le, re)
-# 
-#     # If best_dim is -1, then we have found a place where there are no choices.
-#     # Exit out and set the node to None.
-#     if best_dim == -1:
-#         print 'Failed to split grid.'
-#         return -1
-# 
-#         
-#     split = <Split *> malloc(sizeof(Split))
-#     split.dim = best_dim
-#     split.pos = split_pos
-# 
-#     #del data
-# 
-#     # Create a Split
-#     divide(node, split)
-# 
-#     # Populate Left Node
-#     #print 'Inserting left node', node.left_edge, node.right_edge
-#     if less_id == 1:
-#         insert_grid(node.left, gle, gre,
-#                      gid, rank, size)
-# 
-#     # Populate Right Node
-#     #print 'Inserting right node', node.left_edge, node.right_edge
-#     if greater_id == 1:
-#         insert_grid(node.right, gle, gre,
-#                      gid, rank, size)
-# 
-#     return 0
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef split_grid(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int gid, 
+               int rank,
+               int size):
+
+    cdef int j
+    data = <np.float64_t ***> malloc(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+    for j in range(2):
+        data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    for j in range(3):
+        data[0][0][j] = gle[j]
+        data[0][1][j] = gre[j]
+
+    less_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if nless == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if ngreater == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -442,10 +413,6 @@
                         np.uint8_t *less_ids,
                         np.uint8_t *greater_ids,
                        ):
-# cdef kdtree_get_choices(int n_grids, 
-#                         np.ndarray[np.float64_t, ndim=3] data,
-#                         np.ndarray[np.float64_t, ndim=1] l_corner,
-#                         np.ndarray[np.float64_t, ndim=1] r_corner):
     cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
     cdef np.float64_t **uniquedims, *uniques, split
     uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))


https://bitbucket.org/yt_analysis/yt/commits/4f019273c7c5/
Changeset:   4f019273c7c5
Branch:      yt
User:        samskillman
Date:        2013-06-05 22:02:17
Summary:     Updating the tests, and now they pass!  This does change the api for traversing
a tree, but that's not in the API, so I think this is ok.  No one should be
directly accessing that.
Affected #:  2 files

diff -r 8587b273c9b851b2b0ca01601bd5d8813c9dd062 -r 4f019273c7c518fe90b5b482d7341e6e5260cea0 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -140,19 +140,20 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid != -1:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):

diff -r 8587b273c9b851b2b0ca01601bd5d8813c9dd062 -r 4f019273c7c518fe90b5b482d7341e6e5260cea0 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -24,7 +24,8 @@
 """
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.amr_kdtree.amr_kdtools import depth_traverse
+from yt.utilities.lib.amr_kdtools import depth_traverse, \
+        get_left_edge, get_right_edge
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
@@ -53,17 +54,19 @@
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
-    for node in depth_traverse(kd.tree):
+    for node in depth_traverse(kd.tree.trunk):
         if node.grid is None:
             continue
         grid = pf.h.grids[node.grid - kd._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        tree_ok *= np.all(grid.LeftEdge <= node.left_edge)
-        tree_ok *= np.all(grid.RightEdge >= node.right_edge)
+        tree_ok *= np.all(grid.LeftEdge <= nle)
+        tree_ok *= np.all(grid.RightEdge >= nre)
         tree_ok *= np.all(dims > 0)
 
     yield assert_equal, True, tree_ok


https://bitbucket.org/yt_analysis/yt/commits/1833903a59d2/
Changeset:   1833903a59d2
Branch:      yt
User:        samskillman
Date:        2013-06-05 23:45:38
Summary:     Clean up the old tools file, removing all the stuff that got moved to cython
and give it a PEP talk.
Affected #:  1 file

diff -r 4f019273c7c518fe90b5b482d7341e6e5260cea0 -r 1833903a59d29bddfdb79af67c209763db45221e yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,383 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-from yt.utilities.lib.amr_kdtools import kd_is_leaf
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-# def viewpoint_traverse(tree, viewpoint):
-#     '''
-#     Yields a viewpoint dependent traversal of the kd-tree.  Starts
-#     with nodes furthest away from viewpoint.
-#     '''
-# 
-#     current = tree.trunk
-#     previous = None
-#     while current is not None:
-#         yield current
-#         current, previous = step_viewpoint(current, previous, viewpoint)
-# 
-# def step_viewpoint(current, previous, viewpoint):
-#     '''
-#     Takes a single step in the viewpoint based traversal.  Always
-#     goes to the node furthest away from viewpoint first.
-#     '''
-#     if kd_is_leaf(current): # At a leaf, move back up
-#         previous = current
-#         current = current.parent
-#     elif current.split.dim is None: # This is a dead node
-#         previous = current
-#         current = current.parent
-# 
-#     elif current.parent is previous: # Moving down
-#         previous = current
-#         if viewpoint[current.split.dim] <= current.split.pos:
-#             if current.right is not None:
-#                 current = current.right
-#             else:
-#                 previous = current.right
-#         else:
-#             if current.left is not None:
-#                 current = current.left
-#             else:
-#                 previous = current.left
-# 
-#     elif current.right is previous: # Moving up from right 
-#         previous = current
-#         if viewpoint[current.split.dim] <= current.split.pos:
-#             if current.left is not None:
-#                 current = current.left
-#             else:
-#                 current = current.parent
-#         else:
-#             current = current.parent
-# 
-#     elif current.left is previous: # Moving up from left child
-#         previous = current
-#         if viewpoint[current.split.dim] > current.split.pos:
-#             if current.right is not None:
-#                 current = current.right
-#             else:
-#                 current = current.parent
-#         else:
-#             current = current.parent
-# 
-#     return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -418,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-


https://bitbucket.org/yt_analysis/yt/commits/e6a87c284bc7/
Changeset:   e6a87c284bc7
Branch:      yt
User:        samskillman
Date:        2013-06-05 23:47:29
Summary:     So I had broken parallel rendering. Now it is unbroken. Added helper functions
to get split information since it is a struct now.
Affected #:  2 files

diff -r 1833903a59d29bddfdb79af67c209763db45221e -r e6a87c284bc75ac8440712b88c2623602ff09527 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -27,9 +27,9 @@
 import numpy as np
 import h5py
 from amr_kdtools import \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch
-from yt.utilities.lib.amr_kdtools import Node, add_pygrids, \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
         kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
@@ -233,13 +233,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -250,17 +250,18 @@
         node = self.get_node(nprocs + myrank)
 
         while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
+            if owners[node.parent.node_id] == myrank:
+                split_dim = node.parent.get_split_dim()
+                split_pos = node.parent.get_split_pos()
+                left_in_front = viewpoint[split_dim] < split_pos
                 #add_to_front = (left_in_front == (node == node.parent.right))
                 add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
+                image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
                                   image, add_to_front)
-                if node.parent.id == 1: break
+                if node.parent.node_id == 1: break
                 else: node = node.parent
             else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
+                send_to_parent(self.comm, owners[node.parent.node_id], image)
                 break
         image = scatter_image(self.comm, owners[1], image)
         return image
@@ -407,7 +408,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -428,7 +429,7 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
+                i = node.node_id
                 if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
@@ -479,31 +480,27 @@
         splitdims = []
         splitposs = []
         for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
+            nids.append(node.node_id) 
             les.append(node.left_edge) 
             res.append(node.right_edge) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -532,7 +529,7 @@
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r 1833903a59d29bddfdb79af67c209763db45221e -r e6a87c284bc75ac8440712b88c2623602ff09527 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -1,14 +1,33 @@
+"""
+AMR kD-Tree Cython Tools
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
 import numpy as np
 cimport numpy as np
 cimport cython
-from libc.stdlib cimport malloc, free, abs
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
-from field_interpolation_tables cimport \
-    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
-    FIT_eval_transfer_with_light
-from fixed_interpolator cimport *
-
-from cython.parallel import prange, parallel, threadid
+from libc.stdlib cimport malloc, free
 
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
@@ -30,7 +49,7 @@
     cdef readonly Node right
     cdef readonly Node parent
     cdef readonly int grid
-    cdef readonly int node_id
+    cdef readonly long node_id
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef public data
@@ -43,7 +62,7 @@
                   np.ndarray[np.float64_t, ndim=1] left_edge,
                   np.ndarray[np.float64_t, ndim=1] right_edge,
                   int grid,
-                  int node_id):
+                  long node_id):
         self.left = left
         self.right = right
         self.parent = parent
@@ -62,6 +81,23 @@
                                    self.right_edge[2])
         print '\t grid: %i' % self.grid
 
+    def get_split_dim(self):
+        try: 
+            return self.split.dim
+        except:
+            return -1
+    
+    def get_split_pos(self):
+        try: 
+            return self.split.pos
+        except:
+            return np.nan
+
+    def create_split(self, dim, pos):
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = dim 
+        split.pos = pos
+        self.split = split
 
 def get_left_edge(Node node):
     le = np.empty(3, dtype='float64')
@@ -78,19 +114,19 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def _lchild_id(int node_id):
+cdef long _lchild_id(long node_id):
     return (node_id<<1)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def _rchild_id(int node_id):
+cdef long _rchild_id(long node_id):
     return (node_id<<1) + 1
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def _parent_id(int node_id):
+cdef long _parent_id(long node_id):
     return (node_id-1) >> 1
 
 @cython.boundscheck(False)
@@ -174,9 +210,10 @@
         return
 
     # If we should continue to split based on parallelism, do so!
-    # if should_i_split(node, rank, size):
-    #     geo_split(node, gle, gre, grid_id, rank, size)
-    #     return
+    if should_i_split(node, rank, size):
+        geo_split(node, gle, gre, grid_id, rank, size)
+        return
+
     cdef int contained = 1
     for i in range(3):
         if gle[i] > node.left_edge[i] or\
@@ -325,9 +362,9 @@
 
     if ngrids == 1:
         # If we should continue to split based on parallelism, do so!
-        #if should_i_split(node, rank, size):
-        #    geo_split(node, gles, gres, grid_ids, rank, size)
-        #    return
+        if should_i_split(node, rank, size):
+            geo_split(node, gles[0], gres[0], gids[0], rank, size)
+            return
 
         for i in range(3):
             contained *= gles[0][i] <= node.left_edge[i]
@@ -384,8 +421,6 @@
     split.dim = best_dim
     split.pos = split_pos
 
-    #del data
-
     # Create a Split
     divide(node, split)
 
@@ -570,57 +605,54 @@
 
     return 0
 
-# def geo_split_grid(node, gle, gre, grid_id, rank, size):
-#     big_dim = np.argmax(gre-gle)
-#     new_pos = (gre[big_dim] + gle[big_dim])/2.
-#     old_gre = gre.copy()
-#     new_gle = gle.copy()
-#     new_gle[big_dim] = new_pos
-#     gre[big_dim] = new_pos
-# 
-#     split = Split(big_dim, new_pos)
-# 
-#     # Create a Split
-#     divide(node, split)
-# 
-#     # Populate Left Node
-#     #print 'Inserting left node', node.left_edge, node.right_edge
-#     insert_grid(node.left, gle, gre,
-#                 grid_id, rank, size)
-# 
-#     # Populate Right Node
-#     #print 'Inserting right node', node.left_edge, node.right_edge
-#     insert_grid(node.right, new_gle, old_gre,
-#                 grid_id, rank, size)
-#     return
-# 
-# 
-# def geo_split(node, gles, gres, grid_ids, rank, size):
-#     big_dim = np.argmax(gres[0]-gles[0])
-#     new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-#     old_gre = gres[0].copy()
-#     new_gle = gles[0].copy()
-#     new_gle[big_dim] = new_pos
-#     gres[0][big_dim] = new_pos
-#     gles = np.append(gles, np.array([new_gle]), axis=0)
-#     gres = np.append(gres, np.array([old_gre]), axis=0)
-#     grid_ids = np.append(grid_ids, grid_ids, axis=0)
-# 
-#     split = Split(big_dim, new_pos)
-# 
-#     # Create a Split
-#     divide(node, split)
-# 
-#     # Populate Left Node
-#     #print 'Inserting left node', node.left_edge, node.right_edge
-#     insert_grids(node.left, gles[:1], gres[:1],
-#             grid_ids[:1], rank, size)
-# 
-#     # Populate Right Node
-#     #print 'Inserting right node', node.left_edge, node.right_edge
-#     insert_grids(node.right, gles[1:], gres[1:],
-#             grid_ids[1:], rank, size)
-#     return
+cdef geo_split(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int grid_id, 
+               int rank, 
+               int size):
+    cdef int big_dim = 0
+    cdef int i
+    cdef np.float64_t v, my_max = 0.0
+    
+    for i in range(3):
+        v = gre[i] - gle[i]
+        if v > my_max:
+            my_max = v
+            big_dim = i
+
+    new_pos = (gre[big_dim] + gle[big_dim])/2.
+    
+    lnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    for j in range(3):
+        lnew_gle[j] = gle[j]
+        lnew_gre[j] = gre[j]
+        rnew_gle[j] = gle[j]
+        rnew_gre[j] = gre[j]
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = big_dim 
+    split.pos = new_pos
+
+    # Create a Split
+    divide(node, split)
+
+    #lnew_gre[big_dim] = new_pos
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grid(node.left, lnew_gle, lnew_gre, 
+            grid_id, rank, size)
+
+    #rnew_gle[big_dim] = new_pos 
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grid(node.right, rnew_gle, rnew_gre,
+            grid_id, rank, size)
+    return
 
 cdef new_right(Node node, Split * split):
     new_right = Node.right_edge.copy()


https://bitbucket.org/yt_analysis/yt/commits/97dd5cf35207/
Changeset:   97dd5cf35207
Branch:      yt
User:        samskillman
Date:        2013-06-05 23:56:09
Summary:     Few more cleanup items. Now streamlines pass again.
Affected #:  4 files

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -13,10 +13,14 @@
     cs = np.array([a.ravel() for a in cs]).T
     length = (1.0/128) * 16 # 16 half-widths of a cell
     for nprocs in [1, 2, 4, 8]:
+        print nprocs
         pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
         streams = Streamlines(pf, cs, length=length)
         streams.integrate_through_volume()
+        print 'I did it.'
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
             yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            print path['dts'].sum()
+            print np.all(path['t'] <= (1.0 + 1e-10))
             path["Density"]

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -30,7 +30,8 @@
         receive_and_reduce, send_to_parent, scatter_image
 
 from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
-        kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
@@ -479,10 +480,10 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
+        for node in depth_first_touch(self.tree.trunk):
             nids.append(node.node_id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
@@ -517,14 +518,18 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -45,11 +45,11 @@
 @cython.cdivision(True)
 cdef class Node:
 
-    cdef readonly Node left
-    cdef readonly Node right
-    cdef readonly Node parent
-    cdef readonly int grid
-    cdef readonly long node_id
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public long node_id
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef public data
@@ -72,6 +72,7 @@
             self.right_edge[i] = right_edge[i]
         self.grid = grid
         self.node_id = node_id
+        self.split == NULL
 
     def print_me(self):
         print 'Node %i' % self.node_id
@@ -82,12 +83,34 @@
         print '\t grid: %i' % self.grid
 
     def get_split_dim(self):
-        try: 
+        if self.split != NULL:
             return self.split.dim
-        except:
+        else:
             return -1
     
     def get_split_pos(self):
+        if self.split != NULL:
+            return self.split.pos
+        else:
+            return np.nan
+
+    def get_left_edge(self):
+        return get_left_edge(self)
+    
+    def get_right_edge(self):
+        return get_right_edge(self)
+
+    def set_left_edge(self, np.ndarray[np.float64_t, ndim=1] left_edge):
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+    
+    def set_right_edge(self, np.ndarray[np.float64_t, ndim=1] right_edge):
+        cdef int i
+        for i in range(3):
+            self.right_edge[i] = right_edge[i]
+    
+    def get_split_pos(self):
         try: 
             return self.split.pos
         except:
@@ -654,16 +677,6 @@
             grid_id, rank, size)
     return
 
-cdef new_right(Node node, Split * split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-cdef new_left(Node node, Split * split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
 cdef void divide(Node node, Split * split):
     # Create a Split
     node.split = split
@@ -762,12 +775,12 @@
             current = current.parent
             previous = current.right
 
-def depth_first_touch(tree, max_node=None):
+def depth_first_touch(Node tree, max_node=None):
     '''
     Yields a depth-first traversal of the kd tree always going to
     the left child before the right.
     '''
-    current = tree.trunk
+    current = tree
     previous = None
     if max_node is None:
         max_node = np.inf
@@ -776,23 +789,23 @@
             yield current
         current, previous = step_depth(current, previous)
         if current is None: break
-        if current.id >= max_node:
+        if current.node_id >= max_node:
             current = current.parent
             previous = current.right
 
-def breadth_traverse(tree):
+def breadth_traverse(Node tree):
     '''
     Yields a breadth-first traversal of the kd tree always going to
     the left child before the right.
     '''
-    current = tree.trunk
+    current = tree
     previous = None
     while current is not None:
         yield current
         current, previous = step_depth(current, previous)
 
 
-def viewpoint_traverse(tree, viewpoint):
+def viewpoint_traverse(Node tree, viewpoint):
     '''
     Yields a viewpoint dependent traversal of the kd-tree.  Starts
     with nodes furthest away from viewpoint.

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -169,8 +169,8 @@
                    np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if np.any(stream[-step+1,:] < node.left_edge) | \
-                   np.any(stream[-step+1,:] >= node.right_edge):
+            if np.any(stream[-step+1,:] < node.get_left_edge()) | \
+                   np.any(stream[-step+1,:] >= node.get_right_edge()):
                 return step-1
             step -= 1
         return step


https://bitbucket.org/yt_analysis/yt/commits/de67793f831e/
Changeset:   de67793f831e
Branch:      yt
User:        samskillman
Date:        2013-06-06 18:22:35
Summary:     Addressing a few comments. Removed print statements. Changed image reduction to
a while loop with and else, which is pretty sweet. Removed an import * in __init__.
Affected #:  3 files

diff -r 97dd5cf35207eb4420277f9000a0a01165156048 -r de67793f831e5d77f48b6a0730c3939fd03ec94c yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -13,14 +13,10 @@
     cs = np.array([a.ravel() for a in cs]).T
     length = (1.0/128) * 16 # 16 half-widths of a cell
     for nprocs in [1, 2, 4, 8]:
-        print nprocs
         pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
         streams = Streamlines(pf, cs, length=length)
         streams.integrate_through_volume()
-        print 'I did it.'
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
             yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
-            print path['dts'].sum()
-            print np.all(path['t'] <= (1.0 + 1e-10))
             path["Density"]

diff -r 97dd5cf35207eb4420277f9000a0a01165156048 -r de67793f831e5d77f48b6a0730c3939fd03ec94c yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -250,20 +250,19 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.node_id] == myrank:
-                split_dim = node.parent.get_split_dim()
-                split_pos = node.parent.get_split_pos()
-                left_in_front = viewpoint[split_dim] < split_pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
-                                  image, add_to_front)
-                if node.parent.node_id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.node_id], image)
-                break
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            left_in_front = viewpoint[split_dim] < split_pos
+            #add_to_front = (left_in_front == (node == node.parent.right))
+            add_to_front = not left_in_front
+            image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
+                              image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
         image = scatter_image(self.comm, owners[1], image)
         return image
 

diff -r 97dd5cf35207eb4420277f9000a0a01165156048 -r de67793f831e5d77f48b6a0730c3939fd03ec94c yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -40,4 +40,3 @@
 from .marching_cubes import *
 from .GridTree import *
 from .write_array import *
-from .amr_kdtools import *


https://bitbucket.org/yt_analysis/yt/commits/e6acf09c0c9f/
Changeset:   e6acf09c0c9f
Branch:      yt
User:        samskillman
Date:        2013-06-06 19:04:35
Summary:     Simplifying some of the reduction logic, fixing parallel perspective renders.
The scattered image was not being returned from the finalize correctly.
Affected #:  2 files

diff -r de67793f831e5d77f48b6a0730c3939fd03ec94c -r e6acf09c0c9f9b46153862a05874f2a5b977ede5 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -253,18 +253,16 @@
         while owners[node.parent.node_id] == myrank:
             split_dim = node.parent.get_split_dim()
             split_pos = node.parent.get_split_pos()
-            left_in_front = viewpoint[split_dim] < split_pos
-            #add_to_front = (left_in_front == (node == node.parent.right))
-            add_to_front = not left_in_front
-            image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
-                              image, add_to_front)
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
             if node.parent.node_id == 1: break
             else: node = node.parent
         else:
             send_to_parent(self.comm, owners[node.parent.node_id], image)
 
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data

diff -r de67793f831e5d77f48b6a0730c3939fd03ec94c -r e6acf09c0c9f9b46153862a05874f2a5b977ede5 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1074,19 +1074,22 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center
         for brick in self.volume.traverse(self.front_center):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
-        image = sampler.aimage
-        self.finalize_image(image)
+        image = self.finalize_image(sampler.aimage)
         return image
 
     def finalize_image(self, image):
+        view_pos = self.front_center
         image.shape = self.resolution[0], self.resolution[0], 4
+        image = self.volume.reduce_tree_images(image, view_pos)
+        if self.transfer_function.grey_opacity is False:
+            image[:,:,3]=1.0
+        return image
 
 def corners(left_edge, right_edge):
     return np.array([


https://bitbucket.org/yt_analysis/yt/commits/ea98c7a011e2/
Changeset:   ea98c7a011e2
Branch:      yt
User:        samskillman
Date:        2013-06-06 20:51:24
Summary:     long -> np.int64_t, malloc -> alloca. now has the memory of a fish.
Affected #:  1 file

diff -r e6acf09c0c9f9b46153862a05874f2a5b977ede5 -r ea98c7a011e2eaa8d7943987c51c727f7025f6b0 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -49,7 +49,7 @@
     cdef public Node right
     cdef public Node parent
     cdef public int grid
-    cdef public long node_id
+    cdef public np.int64_t node_id
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef public data
@@ -62,7 +62,7 @@
                   np.ndarray[np.float64_t, ndim=1] left_edge,
                   np.ndarray[np.float64_t, ndim=1] right_edge,
                   int grid,
-                  long node_id):
+                  np.int64_t node_id):
         self.left = left
         self.right = right
         self.parent = parent
@@ -109,12 +109,6 @@
         cdef int i
         for i in range(3):
             self.right_edge[i] = right_edge[i]
-    
-    def get_split_pos(self):
-        try: 
-            return self.split.pos
-        except:
-            return np.nan
 
     def create_split(self, dim, pos):
         split = <Split *> malloc(sizeof(Split))
@@ -122,6 +116,9 @@
         split.pos = pos
         self.split = split
 
+    def __dealloc__(self):
+        if self.split != NULL: free(self.split)
+
 def get_left_edge(Node node):
     le = np.empty(3, dtype='float64')
     for i in range(3):
@@ -137,19 +134,19 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef long _lchild_id(long node_id):
+cdef inline np.int64_t _lchild_id(np.int64_t node_id):
     return (node_id<<1)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef long _rchild_id(long node_id):
+cdef inline np.int64_t _rchild_id(np.int64_t node_id):
     return (node_id<<1) + 1
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef long _parent_id(long node_id):
+cdef inline np.int64_t _parent_id(np.int64_t node_id):
     return (node_id-1) >> 1
 
 @cython.boundscheck(False)
@@ -211,8 +208,8 @@
     The entire purpose of this function is to move everything from ndarrays
     to internal C pointers. 
     """
-    pgles = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    pgres = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    pgles = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
     cdef int j
     for j in range(3):
         pgles[j] = gle[j]
@@ -270,12 +267,12 @@
     The entire purpose of this function is to move everything from ndarrays
     to internal C pointers. 
     """
-    pgles = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
-    pgres = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
-    pgids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    pgles = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
     for i in range(ngrids):
-        pgles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        pgres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
         pgids[i] = gids[i]
         for j in range(3):
             pgles[i][j] = gles[i, j]
@@ -304,8 +301,8 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
-    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    less_ids= <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -321,19 +318,19 @@
     #print 'nless: %i' % nless
     #print 'ngreater: %i' % ngreater
 
-    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -418,16 +415,16 @@
                int size):
 
     cdef int j
-    data = <np.float64_t ***> malloc(sizeof(np.float64_t**))
-    data[0] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+    data = <np.float64_t ***> alloca(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
     for j in range(2):
         data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
     for j in range(3):
         data[0][0][j] = gle[j]
         data[0][1][j] = gre[j]
 
-    less_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
-    greater_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
+    less_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
 
     best_dim, split_pos, nless, ngreater = \
         kdtree_get_choices(1, data, node.left_edge, node.right_edge,
@@ -540,17 +537,17 @@
     # Find a Split
     cdef int i, j, k
 
-    data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
+    data = <np.float64_t ***> alloca(ngrids * sizeof(np.float64_t**))
     for i in range(ngrids):
-        data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+        data[i] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
         for j in range(2):
             data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
         for j in range(3):
             data[i][0][j] = gles[i][j]
             data[i][1][j] = gres[i][j]
 
-    less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
-    greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+    less_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
 
     best_dim, split_pos, nless, ngreater = \
         kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
@@ -571,8 +568,8 @@
     # Create a Split
     divide(node, split)
 
-    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
-    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    less_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -585,19 +582,19 @@
             greater_index[ngreater] = i
             ngreater += 1
 
-    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -646,10 +643,10 @@
 
     new_pos = (gre[big_dim] + gle[big_dim])/2.
     
-    lnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    lnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    rnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    rnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    lnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
     for j in range(3):
         lnew_gle[j] = gle[j]


https://bitbucket.org/yt_analysis/yt/commits/1ae43f835321/
Changeset:   1ae43f835321
Branch:      yt
User:        samskillman
Date:        2013-06-06 22:08:32
Summary:     alloca was actually not working as I expected, so I reverted to malloc. Also
fixing should_i_split for a corner case when the node_id is > 2^64. not a great
fix.
Affected #:  1 file

diff -r ea98c7a011e2eaa8d7943987c51c727f7025f6b0 -r 1ae43f8353213e5e3120293feab9ded8e084c594 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -301,8 +301,8 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    less_ids= <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
-    greater_ids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -318,19 +318,19 @@
     #print 'nless: %i' % nless
     #print 'ngreater: %i' % ngreater
 
-    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -354,13 +354,29 @@
     if ngreater > 0:
         add_grids(node.right, ngreater, greater_gles, greater_gres,
                   g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_ids)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_ids)
+    free(greater_gles)
+    free(greater_gres)
+
     return
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef int should_i_split(Node node, int rank, int size):
-    if node.node_id < size:
+    if node.node_id < size and node.node_id > 0:
         return 1
     return 0
 
@@ -568,8 +584,8 @@
     # Create a Split
     divide(node, split)
 
-    less_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
-    greater_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -582,19 +598,19 @@
             greater_index[ngreater] = i
             ngreater += 1
 
-    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -623,6 +639,22 @@
         insert_grids(node.right, ngreater, greater_gles, greater_gres,
                      g_ids, rank, size)
 
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_index)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_index)
+    free(greater_gles)
+    free(greater_gres)
+
+
     return 0
 
 cdef geo_split(Node node, 


https://bitbucket.org/yt_analysis/yt/commits/55ba8471e0c3/
Changeset:   55ba8471e0c3
Branch:      yt
User:        samskillman
Date:        2013-06-13 20:19:26
Summary:     Merging and resolving simple conflict
Affected #:  19 files

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -143,10 +143,10 @@
             return self.CoM
         pm = self["ParticleMassMsun"]
         c = {}
-        c[0] = self["particle_position_x"]
-        c[1] = self["particle_position_y"]
-        c[2] = self["particle_position_z"]
-        c_vec = np.zeros(3)
+        # We shift into a box where the origin is the left edge
+        c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
+        c[1] = self["particle_position_y"] - self.pf.domain_left_edge[1]
+        c[2] = self["particle_position_z"] - self.pf.domain_left_edge[2]
         com = []
         for i in range(3):
             # A halo is likely periodic around a boundary if the distance 
@@ -159,13 +159,12 @@
                 com.append(c[i])
                 continue
             # Now we want to flip around only those close to the left boundary.
-            d_left = c[i] - self.pf.domain_left_edge[i]
-            sel = (d_left <= (self.pf.domain_width[i]/2))
+            sel = (c[i] <= (self.pf.domain_width[i]/2))
             c[i][sel] += self.pf.domain_width[i]
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c%self.pf.domain_width
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -212,7 +212,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -77,6 +77,15 @@
         parses the Orion Star Particle text files
              
         """
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -87,15 +96,38 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
 
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+            
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
         fn = grid.pf.fullplotdir[:-4] + "sink"
         with open(fn, 'r') as f:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -44,6 +44,17 @@
         parses the Orion Star Particle text files
         
         """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -54,17 +65,39 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
 
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
-        fn = grid.pf.fullplotdir + "/StarParticles"
         with open(fn, 'r') as f:
             lines = f.readlines()
             particles = []

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -21,4 +21,9 @@
     config.add_subpackage("castro")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -369,6 +369,20 @@
     if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
+def is_root():
+    """
+    This function returns True if it is on the root processor of the
+    topcomm and False otherwise.
+    """
+    from yt.config import ytcfg
+    cfg_option = "__topcomm_parallel_rank"
+    if not ytcfg.getboolean("yt","__parallel"):
+        return True
+    if ytcfg.getint("yt", cfg_option) > 0: 
+        return False
+    return True
+
+
 #
 # Our signal and traceback handling functions
 #

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -9,6 +9,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('grid_data_format', parent_package, top_path)
     config.add_subpackage("conversion")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -252,6 +252,7 @@
     config.add_extension("amr_kdtools", 
                          ["yt/utilities/lib/amr_kdtools.pyx"],
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -257,7 +257,7 @@
             try:
                 rv = func(*args, **kwargs)
                 all_clear = 1
-            except:
+            except Exception as ex:
                 traceback.print_last()
                 all_clear = 0
         else:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -133,14 +133,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))
@@ -161,14 +161,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -55,7 +55,7 @@
     traceback.print_exception(exc_type, exc, tb)
     task = ytcfg.getint("yt", "__global_parallel_rank")
     size = ytcfg.getint("yt", "__global_parallel_size")
-    print "Starting RPDB server on task %s ; connect with 'yt rpdb %s'" \
+    print "Starting RPDB server on task %s ; connect with 'yt rpdb -t %s'" \
             % (task,task)
     handler = pdb_handler(tb)
     server = PdbXMLRPCServer(("localhost", 8010+task))

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -56,6 +56,7 @@
     config.add_subpackage("lib")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
+    config.add_subpackage("tests")
     hdf5_inc, hdf5_lib = check_for_hdf5()
     include_dirs = [hdf5_inc]
     library_dirs = [hdf5_lib]

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -717,7 +717,7 @@
             cbname = callback_registry[key]._type_name
             CallbackMaker = callback_registry[key]
             callback = invalidate_plot(apply_callback(CallbackMaker))
-            callback.__doc__ = CallbackMaker.__init__.__doc__
+            callback.__doc__ = CallbackMaker.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
     @invalidate_plot

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/visualization/setup.py
--- a/yt/visualization/setup.py
+++ b/yt/visualization/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('visualization', parent_package, top_path)
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1110,11 +1110,13 @@
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, use_kd=True, no_ghost=False, use_light=False):
+                 pf = None, use_kd=True, no_ghost=False, use_light=False,
+                 inner_radius = 10):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
         self.center = np.array(center, dtype='float64')
         self.radius = radius
+        self.inner_radius = inner_radius
         self.nside = nside
         self.use_kd = use_kd
         if transfer_function is None:
@@ -1122,9 +1124,11 @@
         self.transfer_function = transfer_function
 
         if isinstance(self.transfer_function, ProjectionTransferFunction):
-            self._sampler_object = ProjectionSampler
+            self._sampler_object = InterpolatedProjectionSampler
+            self._needs_tf = 0
         else:
             self._sampler_object = VolumeRenderSampler
+            self._needs_tf = 1
 
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -1148,15 +1152,20 @@
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
         vs = arr_pix2vec_nest(self.nside, np.arange(nv))
-        vs *= self.radius
-        vs.shape = nv, 1, 3
+        vs.shape = (nv, 1, 3)
+        vs += 1e-8
         uv = np.ones(3, dtype='float64')
         positions = np.ones((nv, 1, 3), dtype='float64') * self.center
+        dx = min(g.dds.min() for g in self.pf.h.find_point(self.center)[0])
+        positions += self.inner_radius * dx * vs
+        vs *= self.radius
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                np.zeros(3, dtype='float64'),
-                self.transfer_function, self.sub_samples)
+                np.zeros(3, dtype='float64'))
+        if self._needs_tf:
+            args += (self.transfer_function,)
+        args += (self.sub_samples,)
         return args
 
     def _render(self, double_check, num_threads, image, sampler):
@@ -1231,28 +1240,14 @@
     def save_image(self, image, fn=None, clim=None, label = None):
         if self.comm.rank == 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
-            import matplotlib.figure
-            import matplotlib.backends.backend_agg
-            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
-            pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
-            image *= self.radius * self.pf['cm']
-            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
-
-            fig = matplotlib.figure.Figure((10, 5))
-            ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
-            cb = fig.colorbar(implot, orientation='horizontal')
-
-            if label == None:
-                cb.set_label("Projected %s" % self.fields[0])
+            if label is None:
+                label = "Projected %s" % (self.fields[0])
+            if clim is not None:
+                cmin, cmax = clim
             else:
-                cb.set_label(label)
-            if clim is not None: cb.set_clim(*clim)
-            ax.xaxis.set_ticks(())
-            ax.yaxis.set_ticks(())
-            canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
-            canvas.print_figure(fn)
-
+                cmin = cmax = None
+            plot_allsky_healpix(image[:,0,0], self.nside, fn, label, 
+                                cmin = cmin, cmax = cmax)
 
 class AdaptiveHEALpixCamera(Camera):
     def __init__(self, center, radius, nside,
@@ -2022,7 +2017,7 @@
     nv = 12*nside**2
     image = np.zeros((nv,1,4), dtype='float64', order='C')
     vs = arr_pix2vec_nest(nside, np.arange(nv))
-    vs.shape = (nv,1,3)
+    vs.shape = (nv, 1, 3)
     if rotation is not None:
         vs2 = vs.copy()
         for i in range(3):


https://bitbucket.org/yt_analysis/yt/commits/a5f16d327d7e/
Changeset:   a5f16d327d7e
Branch:      yt
User:        MatthewTurk
Date:        2013-07-01 16:01:22
Summary:     Merged in samskillman/yt (pull request #525)

Cythonize the AMRKDTree build
Affected #:  9 files

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,435 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(node):
-    has_l_child = node.left is None
-    has_r_child = node.right is None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -470,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,13 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch, add_grid
+from amr_kdtools import \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -67,12 +70,11 @@
         self.comm_rank = comm_rank
         self.comm_size = comm_size
         self.trunk = Node(None, None, None,
-                left, right, None, 1)
+                left, right, -1, 1)
         if grids is None:
-            self.grids = pf.h.region((left+right)/2., left, right)._grids
-        else:
-            self.grids = grids
-        self.build(grids)
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
         lvl_range = range(self.min_level, self.max_level+1)
@@ -91,7 +93,8 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
                     del gles, gres, gids, grids
@@ -99,31 +102,35 @@
                     grids_added += grids.size
                     [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
                               self.comm_rank, self.comm_size) for g in grids]
-        else:
-            gles = np.array([g.LeftEdge for g in grids])
-            gres = np.array([g.RightEdge for g in grids])
-            gids = np.array([g.id for g in grids])
+            return
 
-            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids, grids
+        for lvl in lvl_range:
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl])
 
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
 
-    def build(self, grids = None):
+
+    def build(self, grids=None):
         self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -134,19 +141,20 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):
@@ -204,14 +212,8 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -232,13 +234,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -248,33 +250,32 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
-                                  image, add_to_front)
-                if node.parent.id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
-                break
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
@@ -292,8 +293,8 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
         if not self._initialized: self.brick_dimensions.append(dims)
@@ -405,7 +406,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -426,8 +427,8 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
-                if node.grid is not None:
+                i = node.node_id
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
@@ -476,32 +477,28 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+        for node in depth_first_touch(self.tree.trunk):
+            nids.append(node.node_id) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -518,19 +515,23 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/lib/amr_kdtools.pyx
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -0,0 +1,921 @@
+"""
+AMR kD-Tree Cython Tools
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+
+DEF Nch = 4
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef class Node:
+
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public np.int64_t node_id
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public data
+    cdef Split * split
+
+    def __cinit__(self, 
+                  Node parent, 
+                  Node left, 
+                  Node right, 
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  int grid,
+                  np.int64_t node_id):
+        self.left = left
+        self.right = right
+        self.parent = parent
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.grid = grid
+        self.node_id = node_id
+        self.split == NULL
+
+    def print_me(self):
+        print 'Node %i' % self.node_id
+        print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], 
+                                   self.left_edge[2])
+        print '\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], 
+                                   self.right_edge[2])
+        print '\t grid: %i' % self.grid
+
+    def get_split_dim(self):
+        if self.split != NULL:
+            return self.split.dim
+        else:
+            return -1
+    
+    def get_split_pos(self):
+        if self.split != NULL:
+            return self.split.pos
+        else:
+            return np.nan
+
+    def get_left_edge(self):
+        return get_left_edge(self)
+    
+    def get_right_edge(self):
+        return get_right_edge(self)
+
+    def set_left_edge(self, np.ndarray[np.float64_t, ndim=1] left_edge):
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+    
+    def set_right_edge(self, np.ndarray[np.float64_t, ndim=1] right_edge):
+        cdef int i
+        for i in range(3):
+            self.right_edge[i] = right_edge[i]
+
+    def create_split(self, dim, pos):
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = dim 
+        split.pos = pos
+        self.split = split
+
+    def __dealloc__(self):
+        if self.split != NULL: free(self.split)
+
+def get_left_edge(Node node):
+    le = np.empty(3, dtype='float64')
+    for i in range(3):
+        le[i] = node.left_edge[i]
+    return le
+
+def get_right_edge(Node node):
+    re = np.empty(3, dtype='float64')
+    for i in range(3):
+        re[i] = node.right_edge[i]
+    return re
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _lchild_id(np.int64_t node_id):
+    return (node_id<<1)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _rchild_id(np.int64_t node_id):
+    return (node_id<<1) + 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _parent_id(np.int64_t node_id):
+    return (node_id-1) >> 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_build(Node node, int rank, int size):
+    if (node.node_id < size) or (node.node_id >= 2*size):
+        return 1 
+    elif node.node_id - size == rank:
+        return 1 
+    else:
+        return 0 
+
+def kd_traverse(Node trunk, viewpoint=None):
+    if viewpoint is None:
+        for node in depth_traverse(trunk):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+    else:
+        for node in viewpoint_traverse(trunk, viewpoint):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grid(Node node, 
+                   np.float64_t *gle, 
+                   np.float64_t *gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grid(node, gle, gre, gid, rank, size)
+    else:
+        less_id = gle[node.split.dim] < node.split.pos
+        if less_id:
+            add_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+        greater_id = gre[node.split.dim] > node.split.pos
+        if greater_id:
+            add_grid(node.right, gle, gre,
+                     gid, rank, size)
+    return
+
+def add_pygrid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    cdef int j
+    for j in range(3):
+        pgles[j] = gle[j]
+        pgres[j] = gre[j]
+
+    add_grid(node, pgles, pgres, gid, rank, size)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef insert_grid(Node node, 
+                np.float64_t *gle, 
+                np.float64_t *gre, 
+                int grid_id, 
+                int rank,
+                int size):
+    if not should_i_build(node, rank, size):
+        return
+
+    # If we should continue to split based on parallelism, do so!
+    if should_i_split(node, rank, size):
+        geo_split(node, gle, gre, grid_id, rank, size)
+        return
+
+    cdef int contained = 1
+    for i in range(3):
+        if gle[i] > node.left_edge[i] or\
+           gre[i] < node.right_edge[i]:
+            contained *= 0
+
+    if contained == 1:
+        node.grid = grid_id 
+        assert(node.grid != -1)
+        return
+
+    # Split the grid
+    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1 
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_pygrids(Node node, 
+                    int ngrids,
+                    np.ndarray[np.float64_t, ndim=2] gles, 
+                    np.ndarray[np.float64_t, ndim=2] gres, 
+                    np.ndarray[np.int64_t, ndim=1] gids, 
+                    int rank,
+                    int size):
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    for i in range(ngrids):
+        pgles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgids[i] = gids[i]
+        for j in range(3):
+            pgles[i][j] = gles[i, j]
+            pgres[i][j] = gres[i, j]
+
+    add_grids(node, ngrids, pgles, pgres, pgids, rank, size)
+
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grids(Node node, 
+                    int ngrids,
+                    np.float64_t **gles, 
+                    np.float64_t **gres, 
+                    np.int64_t *gids, 
+                    int rank,
+                    int size):
+    cdef int i, j, nless, ngreater
+    cdef np.int64_t gid
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, ngrids, gles, gres, gids, rank, size)
+        return
+
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if gles[i][node.split.dim] < node.split.pos:
+            less_ids[nless] = i
+            nless += 1
+            
+        if gres[i][node.split.dim] > node.split.pos:
+            greater_ids[ngreater] = i
+            ngreater += 1
+
+    #print 'nless: %i' % nless
+    #print 'ngreater: %i' % ngreater
+
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    cdef int index
+    for i in range(nless):
+        index = less_ids[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
+
+    if nless > 0:
+        add_grids(node.left, nless, less_gles, less_gres,
+                  l_ids, rank, size)
+            
+    for i in range(ngreater):
+        index = greater_ids[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
+
+    if ngreater > 0:
+        add_grids(node.right, ngreater, greater_gles, greater_gres,
+                  g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_ids)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_ids)
+    free(greater_gles)
+    free(greater_gres)
+
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_split(Node node, int rank, int size):
+    if node.node_id < size and node.node_id > 0:
+        return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void insert_grids(Node node, 
+                       int ngrids,
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
+                       int rank,
+                       int size):
+    
+    if not should_i_build(node, rank, size) or ngrids == 0:
+        return
+    cdef int contained = 1
+    cdef int check
+
+    if ngrids == 1:
+        # If we should continue to split based on parallelism, do so!
+        if should_i_split(node, rank, size):
+            geo_split(node, gles[0], gres[0], gids[0], rank, size)
+            return
+
+        for i in range(3):
+            contained *= gles[0][i] <= node.left_edge[i]
+            contained *= gres[0][i] >= node.right_edge[i]
+    
+        if contained == 1:
+            # print 'Node fully contained, setting to grid: %i' % gids[0]
+            node.grid = gids[0]
+            assert(node.grid != -1)
+            return
+
+    # Split the grids
+    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef split_grid(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int gid, 
+               int rank,
+               int size):
+
+    cdef int j
+    data = <np.float64_t ***> alloca(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+    for j in range(2):
+        data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    for j in range(3):
+        data[0][0][j] = gle[j]
+        data[0][1][j] = gre[j]
+
+    less_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if nless == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if ngreater == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef kdtree_get_choices(int n_grids, 
+                        np.float64_t ***data,
+                        np.float64_t *l_corner,
+                        np.float64_t *r_corner,
+                        np.uint8_t *less_ids,
+                        np.uint8_t *greater_ids,
+                       ):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    my_split = 0
+    best_dim = -1
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                # print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i][j][dim] and
+                        data[i][j][dim] < r_corner[dim]):
+                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i][j][dim]:
+                        skipit = 1
+                        # print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i][j][dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        # print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef int nless=0, ngreater=0
+    for i in range(n_grids):
+        if data[i][0][best_dim] < split:
+            less_ids[i] = 1
+            nless += 1
+        else:
+            less_ids[i] = 0
+        if data[i][1][best_dim] > split:
+            greater_ids[i] = 1
+            ngreater += 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, nless, ngreater
+
+#@cython.boundscheck(False)
+#@cython.wraparound(False)
+#@cython.cdivision(True)
+cdef int split_grids(Node node, 
+                       int ngrids,
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
+                       int rank,
+                       int size):
+    # Find a Split
+    cdef int i, j, k
+
+    data = <np.float64_t ***> alloca(ngrids * sizeof(np.float64_t**))
+    for i in range(ngrids):
+        data[i] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+        for j in range(2):
+            data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        for j in range(3):
+            data[i][0][j] = gles[i][j]
+            data[i][1][j] = gres[i][j]
+
+    less_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+ 
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grids.'
+        return -1
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if less_ids[i] == 1:
+            less_index[nless] = i
+            nless += 1
+
+        if greater_ids[i] == 1:
+            greater_index[ngreater] = i
+            ngreater += 1
+
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    cdef int index
+    for i in range(nless):
+        index = less_index[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
+
+    if nless > 0:
+        # Populate Left Node
+        #print 'Inserting left node', node.left_edge, node.right_edge
+        insert_grids(node.left, nless, less_gles, less_gres,
+                     l_ids, rank, size)
+
+    for i in range(ngreater):
+        index = greater_index[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
+
+    if ngreater > 0:
+        # Populate Right Node
+        #print 'Inserting right node', node.left_edge, node.right_edge
+        insert_grids(node.right, ngreater, greater_gles, greater_gres,
+                     g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_index)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_index)
+    free(greater_gles)
+    free(greater_gres)
+
+
+    return 0
+
+cdef geo_split(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int grid_id, 
+               int rank, 
+               int size):
+    cdef int big_dim = 0
+    cdef int i
+    cdef np.float64_t v, my_max = 0.0
+    
+    for i in range(3):
+        v = gre[i] - gle[i]
+        if v > my_max:
+            my_max = v
+            big_dim = i
+
+    new_pos = (gre[big_dim] + gle[big_dim])/2.
+    
+    lnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+
+    for j in range(3):
+        lnew_gle[j] = gle[j]
+        lnew_gre[j] = gre[j]
+        rnew_gle[j] = gle[j]
+        rnew_gre[j] = gre[j]
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = big_dim 
+    split.pos = new_pos
+
+    # Create a Split
+    divide(node, split)
+
+    #lnew_gre[big_dim] = new_pos
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grid(node.left, lnew_gle, lnew_gre, 
+            grid_id, rank, size)
+
+    #rnew_gle[big_dim] = new_pos 
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grid(node.right, rnew_gle, rnew_gre,
+            grid_id, rank, size)
+    return
+
+cdef void divide(Node node, Split * split):
+    # Create a Split
+    node.split = split
+    
+    cdef np.ndarray[np.float64_t, ndim=1] le = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] re = np.zeros(3, dtype='float64')
+
+    cdef int i
+    for i in range(3):
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+    re[split.dim] = split.pos
+
+    node.left = Node(node, None, None,
+                     le, re, node.grid,
+                     _lchild_id(node.node_id))
+
+    re[split.dim] = node.right_edge[split.dim]
+    le[split.dim] = split.pos
+    node.right = Node(node, None, None,
+                      le, re, node.grid,
+                      _rchild_id(node.node_id))
+
+    return
+# 
+def kd_sum_volume(Node node):
+    cdef np.float64_t vol = 1.0
+    if (node.left is None) and (node.right is None):
+        if node.grid == -1:
+            return 0.0
+        for i in range(3):
+            vol *= node.right_edge[i] - node.left_edge[i]
+        return vol 
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_node_check(Node node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid != -1:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(Node node):
+    cdef int has_l_child = node.left == None
+    cdef int has_r_child = node.right == None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+ 
+def depth_traverse(Node trunk, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def depth_first_touch(Node tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(Node tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(Node tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(Node current, 
+                   Node previous, 
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
+cdef int point_in_node(Node node, 
+                       np.ndarray[np.float64_t, ndim=1] point):
+    cdef int i
+    cdef int inside = 1
+    for i in range(3):
+        inside *= node.left_edge[i] <= point[i]
+        inside *= node.right_edge[i] > point[i]
+    return inside
+
+
+def find_node(Node node,
+              np.ndarray[np.float64_t, ndim=1] point):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(point_in_node(node, point))
+    while not kd_is_leaf(node):
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+
+

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -249,6 +249,9 @@
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_extension("amr_kdtools", 
+                         ["yt/utilities/lib/amr_kdtools.pyx"],
+                         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -24,7 +24,8 @@
 """
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.amr_kdtree.amr_kdtools import depth_traverse
+from yt.utilities.lib.amr_kdtools import depth_traverse, \
+        get_left_edge, get_right_edge
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
@@ -53,17 +54,19 @@
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
-    for node in depth_traverse(kd.tree):
+    for node in depth_traverse(kd.tree.trunk):
         if node.grid is None:
             continue
         grid = pf.h.grids[node.grid - kd._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        tree_ok *= np.all(grid.LeftEdge <= node.left_edge)
-        tree_ok *= np.all(grid.RightEdge >= node.right_edge)
+        tree_ok *= np.all(grid.LeftEdge <= nle)
+        tree_ok *= np.all(grid.RightEdge >= nre)
         tree_ok *= np.all(dims > 0)
 
     yield assert_equal, True, tree_ok

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -169,8 +169,8 @@
                    np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if np.any(stream[-step+1,:] < node.left_edge) | \
-                   np.any(stream[-step+1,:] >= node.right_edge):
+            if np.any(stream[-step+1,:] < node.get_left_edge()) | \
+                   np.any(stream[-step+1,:] >= node.get_right_edge()):
                 return step-1
             step -= 1
         return step

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1130,19 +1130,22 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center
         for brick in self.volume.traverse(self.front_center):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
-        image = sampler.aimage
-        self.finalize_image(image)
+        image = self.finalize_image(sampler.aimage)
         return image
 
     def finalize_image(self, image):
+        view_pos = self.front_center
         image.shape = self.resolution[0], self.resolution[0], 4
+        image = self.volume.reduce_tree_images(image, view_pos)
+        if self.transfer_function.grey_opacity is False:
+            image[:,:,3]=1.0
+        return image
 
 def corners(left_edge, right_edge):
     return np.array([

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list